prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import time
import numpy as np
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from decorator import decorator
from scipy import integrate
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
from .transform import arcmin2rad, fwhm2sigma
import yaml
from pprint import pprint
from contextlib import contextmanager
#########################################################
# CMB and Noise Power Spectra
#########################################################
# ------------------
# CMB Power Spectrum
# ------------------
def load_Cl_Planck2018(lmin=0):
"""
load Cl from camb generated Dl file
Parameters
----------
lmax: max ell number
lmin: min ell number
Returns
-------
Cls
available keys in Cls : L, TT, EE, BB, TE
"""
assert lmin == 0, "lmin=0 cannot be changed. It is only to indicate explicitly that the " \
"returned results will start from lmin=0.\n" \
"If you want to get the Cl in a custom ell range use utilities.get_CMB_Cl."
Cl_fname = os.path.join(os.path.dirname(__file__),
"Cl_Planck2018_camb.npz")
Cls = np.load(Cl_fname)
# L = Cls['ell']
# Cl_TT = Cls['tt']
# Cl_EE = Cls['ee']
# Cl_BB = Cls['bb']
# Cl_TE = Cls['te']
return Cls
def get_CMB_Cl(lmax, lmin=0, mode="TT", return_ell=False, uK=False):
"""
load Cl from camb generated Dl file
Parameters
----------
lmax: int
max ell number
lmin: int
min ell number
mode: str
CMB mode to return (e.g. "TT", "EE", etc)
return_ell: bool
if True, returns the corresponding ell array as well
Returns
-------
Cl [K^2]
or
ell, Cl [K^2]
available keys in Cls : L, TT, EE, BB, TE
"""
Cl_fname = os.path.join(os.path.dirname(__file__),
"Cl_Planck2018_camb.npz")
Cls = load_Cl_Planck2018()
L = Cls['L'][lmin:lmax+1]
Cl = Cls[mode][lmin:lmax+1]
if uK:
Cl *= 1E12
if return_ell:
return L, Cl
else:
return Cl
# --------------------
# Noise Power Spectrum
# --------------------
def _compute_Nl(sigma_n, lmax, lmin=0, fwhm=None, apply_beam=False, uK=False, return_ell=False):
"""
compute the instrumental noise power spectrum (uK^2)
Parameters
----------
sigma_n [uK-arcmin]:
noise level in uK-arcmin
can a scalar or an array for multiple channels
the length must match that of fwhm
lmax:
maximum ell mode in the power spectrum
lmin:
minimum ell mode in the power spectrum
fwhm [arcmin]:
beam fwhm in arcmins
can be scalar or an array for multiple channels
apply_beam:
if True, deconvolves the noise with beam
uK: bool
if True, the returned Nl will be in uK^2 units
return_ell: bool
if True, returns the corresponding ell array as well
Returns
-------
Nl [K^2]
or
ell, Nl [K^2]
"""
# make sure input is an array
if np.isscalar(sigma_n):
sigma_n = [sigma_n]
#convert sigma_n to radians
sigma_n = arcmin2rad(np.asarray(sigma_n))
# set up ell
L = np.arange(lmin, lmax + 1)
# determine number of channels
n_channels = len(sigma_n)
# calculate w^(-1) noise power spectrum prefactor
w_inverse = sigma_n ** 2
# calculate the noise power spectrum N_l (no beam) for each channel
Nl_channel = np.array([w_inverse[channel] * np.ones(L.shape) for channel in range(n_channels)])
# apply beam to the noise power spectrum
if apply_beam:
if fwhm:
# convert scalar fwhm to list for consistency
if np.isscalar(fwhm):
fwhm = [fwhm]
# convert fwhm to radians
fwhm = arcmin2rad(np.asarray(fwhm))
# check the length and dimensions of the input
assert len(fwhm) == len(sigma_n), "fwhm and sigma_n must have the same length"
assert np.ndim(fwhm) == np.ndim(sigma_n) == 1
# convert fwhm to sigma
sigma_theta = fwhm ** 2 / 8 / | np.log(2) | numpy.log |
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn
from pyoptmat import ode, models, flowrules, hardening, utility, damage
from pyoptmat.temperature import ConstantParameter as CP
torch.set_default_tensor_type(torch.DoubleTensor)
torch.autograd.set_detect_anomaly(True)
def differ(mfn, p0, eps=1.0e-6):
v0 = mfn(p0).numpy()
puse = p0.numpy()
result = np.zeros(puse.shape)
for ind, val in np.ndenumerate(puse):
dp = np.abs(val) * eps
if dp < eps:
dp = eps
pcurr = np.copy(puse)
pcurr[ind] += dp
v1 = mfn(torch.tensor(pcurr)).numpy()
result[ind] = (v1 - v0) / dp
return result
def simple_diff(fn, p0):
res = []
for i in range(len(p0)):
def mfn(pi):
ps = [pp for pp in p0]
ps[i] = pi
return fn(ps)
res.append(differ(mfn, p0[i]))
return res
class CommonGradient:
def test_gradient_strain(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_strain(self.times, self.strains, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_strain(
self.times, self.strains, self.temperatures
)
),
self.p,
)
for i, (p1, p2) in enumerate(zip(grad, ngrad)):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4))
def test_gradient_stress(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_stress(self.times, self.stresses, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_stress(
self.times, self.stresses, self.temperatures
)
),
self.p,
)
# Skipping the first step helps with noise issues
for i, (p1, p2) in enumerate(zip(grad[1:], ngrad[1:])):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4, atol=1e-7))
class TestPerfectViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.p = [self.E, self.n, self.eta]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]), flowrules.PerfectViscoplasticity(CP(p[1]), CP(p[2]))
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 100.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestIsotropicOnly(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.NoKinematicHardeningModel(),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestHardeningViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor(1000.0)
self.g = torch.tensor(10.0)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d, self.C, self.g]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.FAKinematicHardeningModel(CP(p[6]), CP(p[7])),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[ | np.linspace(0, 0.003, self.ntime) | numpy.linspace |
from fv3fit.keras._models.normalizer import LayerStandardScaler
import numpy as np
import pytest
import tempfile
import tensorflow as tf
@pytest.fixture(params=["standard"])
def scaler(request):
if request.param == "standard":
return LayerStandardScaler()
else:
raise NotImplementedError()
@pytest.mark.parametrize("n_samples, n_features", [(10, 1), (10, 5)])
def test_normalize_layer_properties(scaler, n_samples, n_features):
X = | np.random.uniform(0, 10, size=[n_samples, n_features]) | numpy.random.uniform |
import data_analysis as ed
import integration_ME as ME
import matplotlib.pyplot as plt
from matplotlib import gridspec, cm
import numpy as np
from tqdm import tqdm
import pandas as pd
import seaborn as sns
from scipy.stats import gaussian_kde
import models as md
Nmax = ed.Nmax
sbcolorcycle=sns.color_palette("Set1",n_colors=9,desat=0.8) # Setting Colour palette
sns.set_palette("Set1",n_colors=9,desat=0.8)
sbcolorcyclebright=sns.color_palette("bright")
sbcolorcycledark=sns.color_palette("dark")
sbcolorcyclemuted=sns.color_palette("muted")
sns.set_style("ticks") # Set plot properties
sns.despine()
colorcycle=["#e41a1c", #RED
"#377eb8","#4daf4a", #BLUE, GREEN
"#ff7f00","#984ea3", #ORANGE, PURPLE
"#999999","#f781bf","#a65628","#ffff33"] #GREY, PINK, BROWN
def plottrajectory(model = 'BergU', bayes = 'SMC', gamma = 300, source = 'file',
condition = 'final', bestpar_idx = 'MAP', resurrection = 'withzeros', subfolder = 'smc/'):
# plots the experimental trajectories, their statistics and the prediction of the model for a given set of parameters
# if 'source'== pars then the parameters are the array pars, if 'file' exract from fitting
# if 'bestpar_idx' == 'MAP' then the kde is usedd to calculate the MAP and plot that trajectory, otherwise use int to select a dataset
# model : what model from models.py to use
# bayes : currently only using 'SMC' - sequential monte carlo
# gamma : value of the load
# condition: what iteration of the SMC to choose from. 'final' returns the last one
# resurrection: choose alignment of resurrectiont races ca ben 'withzeros' or 'nozeros'
stride_plots = 1 # stride even more to have less crammed correlation plots
condition = str(condition)
fontsize = 15
if resurrection == 'nozeros':
str_res = 'statnum_resurrection_nozeros'
str_common_vector_res = 'common_time_vector_resurrection_nozeros'
str_longest_vector_res = 'longest_time_vector_resurrection_nozeros'
str_mean_res = 'mean_resurrection_nozeros'
elif resurrection == 'withzeros' :
str_res = 'statnum_resurrection_withzeros'
str_common_vector_res = 'common_time_vector_resurrection_withzeros'
str_longest_vector_res = 'longest_time_vector_resurrection_withzeros'
str_mean_res = 'mean_resurrection_withzeros'
if md.model_properties[model]['integrator_name']=='Berg':
N_dimension = Nmax+1
elif md.model_properties[model]['integrator_name']=='WeakStrong':
parnames = []
N_dimension = int((Nmax+1)*(Nmax+2)/2)
data1 = None
lklh_list = np.array([])
if source == 'file':
if bayes == 'SMC': # only option in the last implementation
# gamma_file = gamma
# if model in ['BergU','BergO','BergOO','WeakStrongU_fixstrong','WeakStrongU','WeakStrongU_doublefix']:
gamma_file = 300
data1_sorted = np.loadtxt('{}pars_{}_MF_{}_{}.out'.format(subfolder,model,gamma_file,condition))
if bestpar_idx == 'MAP': # calculate MAP
kde = gaussian_kde(data1_sorted.T, bw_method = 1.0)
localdensity = kde.evaluate(data1_sorted.T)
sortedidxs = localdensity.argsort()
data1_sorted = data1_sorted[sortedidxs]
localdensity_sorted = localdensity[sortedidxs]
bestpars = data1_sorted[-1]
worstpars = data1_sorted[0]
print("Best pars for the stochastic model are: {}".format(bestpars))
print("Worst pars for the stochastic model are: {}".format(worstpars))
distance = 0
gammas = [300,500,1300]
integrator_name = md.model_properties[model]['integrator_name']
sto_trajs = False
resurrectionzeros = md.model_properties[model]['resurrectionzeros']
for xgamma in gammas:
integration_pars = md.prior_to_integrator(bestpars,model,xgamma)
distance += ed.DistanceLikelihood(xgamma, integrator_name, integration_pars,
return_trajs = False, sto_trajs = False,resurrectionzeros = resurrectionzeros)
bestpars = md.prior_to_integrator(bestpars,model,gamma)
print("Distance at MAP is: {}".format(distance))
elif bestpar_idx == 'score': # calculate the score of all the points and select the best one (this might take time)
gammas = [300,500,1300]
distances = []
integrator_name = md.model_properties[model]['integrator_name']
resurrectionzeros = md.model_properties[model]['resurrectionzeros']
distances = np.loadtxt('{}distances_{}_MF_{}_{}.out'.format(subfolder,model,gamma_file,condition))
# for par in tqdm(data1_sorted):
# distance = 0
# for xgamma in gammas:
# integration_pars = md.prior_to_integrator(par,model,xgamma)
# distance += ed.DistanceLikelihood(xgamma, integrator_name, integration_pars,
# return_trajs = False, sto_trajs = False,resurrectionzeros = resurrectionzeros)
# distances.append(distance)
bestidx = np.argmin(distances)
worstidx = np.argmax(distances)
bestpars = md.prior_to_integrator(data1_sorted[bestidx],model,gamma)
worstpars = md.prior_to_integrator(data1_sorted[worstidx],model,gamma)
print("Best sample distance is: {}".format(distances[bestidx]))
print("For parameter set:", data1_sorted[bestidx])
print("Worst sample distance is: {}".format(distances[worstidx]))
else:
bestpars = data1_sorted[bestpar_idx]
bestpars = md.prior_to_integrator(bestpars,model,gamma)
print("Particular point chosen : {}".format(bestpars))
else:
print('source', source)
bestpars = md.prior_to_integrator(source,model,gamma)
print("Using custom parameter set (in sampling space):", source)
distance = 0
gammas = [300,500,1300]
integrator_name = md.model_properties[model]['integrator_name']
resurrectionzeros = md.model_properties[model]['resurrectionzeros']
for xgamma in gammas:
integration_pars = md.prior_to_integrator(source,model,xgamma)
distance += ed.DistanceLikelihood(xgamma, integrator_name, integration_pars,
return_trajs = False, sto_trajs = False,resurrectionzeros = resurrectionzeros)
print("With distance: ", distance)
## preparing figures and axes
fig = plt.figure(figsize = [15,5])
spec = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace = 0.01)
axes = []
for expcondition in range(3):
axes.append(plt.subplot(spec[expcondition]))
if expcondition > 0 :
axes[expcondition].yaxis.set_ticklabels([])
################################ Plotting trajectories
# Getting the stochastic trajectories
if md.model_properties[model]['integrator_name'] == 'Berg':
A = ME.Berg_Matrix(*bestpars, N=Nmax)
Aext = ME.GetEigenElements(A) # to avoid inverting the matrix constantly
P0_before_stall = ME.Equilibrium(Aext, eigen_given = True)
P0_resurrection = np.zeros(Nmax+1)
P0_resurrection[0] = 1
elif md.model_properties[model]['integrator_name'] == 'WeakStrong':
A = ME.WeakStrong_Matrix(*bestpars, N=Nmax)
Aext = ME.GetEigenElements(A) # to avoid inverting the matrix constantly
P0_before_stall = ME.Equilibrium(Aext, eigen_given = True)
params_stall = bestpars[:]
params_stall[1] = 0 # forbidden dettachment
A_stall = ME.WeakStrong_Matrix(*params_stall, N=Nmax)
Aext_stall = ME.GetEigenElements(A_stall)
P_eq_stall = ME.Normalize(ME.Equilibrium(Aext_stall,eigen_given = True))
M = len(P0_before_stall) # number of possible states
P0_resurrection = np.zeros(M)
P0_resurrection[0] = 1 # first state is (empty state) is the only populated one
## Due to the fact that the initial condition for stalls is not fixed, the statistics are computed using all the trajectories
Pt_release_cumulative = np.zeros((len(ed.data_light_stats[gamma]['longest_time_vector_release']),N_dimension))
N_release = 0
Pt_resurrection_cumulative = np.zeros((len(ed.data_light_stats[gamma][str_longest_vector_res]),N_dimension))
# each one will contain the mean and var of the different release traces (due to different initial conditions)
mean_release = np.array([]).reshape(0,len(ed.data_light_stats[gamma]['longest_time_vector_release']))
mean_release_weak = np.array([]).reshape(0,len(ed.data_light_stats[gamma]['longest_time_vector_release']))
mean_release_strong = np.array([]).reshape(0,len(ed.data_light_stats[gamma]['longest_time_vector_release']))
var_release = np.array([]).reshape(0,len(ed.data_light_stats[gamma]['longest_time_vector_release']))
mean_resurrection = np.array([]).reshape(0,len(ed.data_light_stats[gamma][str_longest_vector_res]))
mean_resurrection_weak = np.array([]).reshape(0,len(ed.data_light_stats[gamma][str_longest_vector_res]))
mean_resurrection_strong= | np.array([]) | numpy.array |
# encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, multiple-statements, attribute-defined-outside-init, logging-not-lazy, arguments-differ, line-too-long, redefined-builtin, singleton-comparison, no-self-use, bad-continuation
import logging
from scipy.spatial.distance import euclidean as ed
from numpy import apply_along_axis, argmin, argmax, sum, full, inf, asarray, mean, where, sqrt
from NiaPy.util import fullArray
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['KrillHerdV1', 'KrillHerdV2', 'KrillHerdV3', 'KrillHerdV4', 'KrillHerdV11']
class KrillHerd(Algorithm):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerd', 'KH']
@staticmethod
def typeParameters(): return {
'NP': lambda x: isinstance(x, int) and x > 0,
'N_max': lambda x: isinstance(x, (int, float)) and x > 0,
'V_f': lambda x: isinstance(x, (int, float)) and x > 0,
'D_max': lambda x: isinstance(x, (int, float)) and x > 0,
'C_t': lambda x: isinstance(x, (int, float)) and x > 0,
'W_n': lambda x: isinstance(x, (int, float)) and x > 0,
'W_f': lambda x: isinstance(x, (int, float)) and x > 0,
'd_s': lambda x: isinstance(x, (int, float)) and x > 0,
'nn': lambda x: isinstance(x, int) and x > 0,
'Cr': lambda x: isinstance(x, float) and 0 <= x <= 1,
'Mu': lambda x: isinstance(x, float) and 0 <= x <= 1,
'epsilon': lambda x: isinstance(x, float) and 0 < x < 1
}
def setParameters(self, NP=50, N_max=0.01, V_f=0.02, D_max=0.002, C_t=0.93, W_n=0.42, W_f=0.38, d_s=2.63, nn=5, Cr=0.2, Mu=0.05, epsilon=1e-31, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments:**
NP {integer} -- Number of krill herds in population
N_max {real} -- maximum induced speed
V_f {real} -- foraging speed
D_max {real} -- maximum diffsion speed
C_t {real} -- constant $\in [0, 2]$
W_n {real} or {array} -- inerta weights of the motion iduced from neighbors $\in [0, 1]$
W_f {real} or {array} -- inerta weights of the motion iduced from fraging $\in [0, 1]$
d_s {real} -- maximum euclidean distance for neighbors
nn {integer} -- maximu neighbors for neighbors effect
Cr {real} -- Crossover rate
Mu {real} -- Mutation rate
epsilon {real} -- Small numbers for devision
"""
self.N, self.N_max, self.V_f, self.D_max, self.C_t, self.W_n, self.W_f, self.d_s, self.nn, self._Cr, self._Mu, self.epsilon = NP, N_max, V_f, D_max, C_t, W_n, W_f, d_s, nn, Cr, Mu, epsilon
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def initWeights(self, task): return fullArray(self.W_n, task.D), fullArray(self.W_f, task.D)
def sensRange(self, ki, KH): return sum([ed(KH[ki], KH[i]) for i in range(self.N)]) / (self.nn * self.N)
def getNeigbors(self, i, ids, KH):
N = list()
for j in range(self.N):
if j != i and ids > ed(KH[i], KH[j]): N.append(j)
return N
def funX(self, x, y): return ((y - x) + self.epsilon) / (ed(y, x) + self.epsilon)
def funK(self, x, y, b, w): return ((x - y) + self.epsilon) / ((w - b) + self.epsilon)
def induceNeigborsMotion(self, i, n, W, KH, KH_f, ikh_b, ikh_w, task):
Ni = self.getNeigbors(i, self.sensRange(i, KH), KH)
Nx, Nf, f_b, f_w = KH[Ni], KH_f[Ni], KH_f[ikh_b], KH_f[ikh_w]
alpha_l = sum(asarray([self.funK(KH_f[i], j, f_b, f_w) for j in Nf]) * asarray([self.funX(KH[i], j) for j in Nx]).T)
alpha_t = 2 * (1 + self.rand() * task.Iters / task.nGEN)
return self.N_max * (alpha_l + alpha_t) + W * n
def induceFragingMotion(self, i, x, x_f, f, W, KH, KH_f, ikh_b, ikh_w, task):
beta_f = 2 * (1 - task.Iters / task.nGEN) * self.funK(KH_f[i], x_f, KH_f[ikh_b], KH_f[ikh_w]) * self.funX(KH[i], x) if KH_f[ikh_b] < KH_f[i] else 0
beta_b = self.funK(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) * self.funX(KH[i], KH[ikh_b])
return self.V_f * (beta_f + beta_b) + W * f
def inducePhysicalDiffusion(self, task): return self.D_max * (1 - task.Iters / task.nGEN) * self.uniform(-1, 1, task.D)
def deltaT(self, task): return self.C_t * sum(task.bcRange())
def crossover(self, x, xo, Cr): return [xo[i] if self.rand() < Cr else x[i] for i in range(len(x))]
def mutate(self, x, x_b, Mu):
return [x[i] if self.rand() < Mu else (x_b[i] + self.rand()) for i in range(len(x))]
def getFoodLocation(self, KH, KH_f, task):
x_food = task.repair(asarray([sum(KH[:, i] / KH_f) for i in range(task.D)]) / sum(1 / KH_f), rnd=self.Rand)
x_food_f = task.eval(x_food)
return x_food, x_food_f
def Mu(self, xf, yf, xf_best, xf_worst): return self._Mu / (self.funK(xf, yf, xf_best, xf_worst) + 1e-31)
def Cr(self, xf, yf, xf_best, xf_worst): return self._Cr * self.funK(xf, yf, xf_best, xf_worst)
def runTask(self, task):
KH, N, F, x, x_fit = self.uniform(task.Lower, task.Upper, [self.N, task.D]), full(self.N, .0), full(self.N, .0), None, task.optType.value * inf
W_n, W_f = self.initWeights(task)
while not task.stopCondI():
KH_f = apply_along_axis(task.eval, 1, KH)
ikh_b, ikh_w = argmin(KH_f), argmax(KH_f)
if KH_f[ikh_b] < x_fit: x, x_fit = KH[ikh_b], KH_f[ikh_b]
x_food, x_food_f = self.getFoodLocation(KH, KH_f, task)
if x_food_f < x_fit: x, x_fit = x_food, x_food_f
N = asarray([self.induceNeigborsMotion(i, N[i], W_n, KH, KH_f, ikh_b, ikh_w, task) for i in range(self.N)])
F = asarray([self.induceFragingMotion(i, x_food, x_food_f, F[i], W_f, KH, KH_f, ikh_b, ikh_w, task) for i in range(self.N)])
D = asarray([self.inducePhysicalDiffusion(task) for i in range(self.N)])
KH_n = KH + (self.deltaT(task) * (N + F + D))
Cr = asarray([self.Cr(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) for i in range(self.N)])
KH_n = asarray([self.crossover(KH_n[i], KH[i], Cr[i]) for i in range(self.N)])
Mu = asarray([self.Mu(KH_f[i], KH_f[ikh_b], KH_f[ikh_b], KH_f[ikh_w]) for i in range(self.N)])
KH_n = asarray([self.mutate(KH_n[i], KH[ikh_b], Mu[i]) for i in range(self.N)])
KH = apply_along_axis(task.repair, 1, KH_n, rnd=self.Rand)
return x, x_fit
class KrillHerdV4(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV4', 'KHv4']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Cr']
del d['Mu']
del d['epsilon']
return d
def setParameters(self, NP=50, N_max=0.01, V_f=0.02, D_max=0.002, C_t=0.93, W_n=0.42, W_f=0.38, d_s=2.63, **ukwargs): KrillHerd.setParameters(self, NP, N_max, V_f, D_max, C_t, W_n, W_f, d_s, 4, 0.2, 0.05, 1e-31, **ukwargs)
class KrillHerdV1(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV1', 'KHv1']
@staticmethod
def typeParameters(): return KrillHerdV4.typeParameters()
def crossover(self, x, xo, Cr): return x
def mutate(self, x, x_b, Mu): return x
class KrillHerdV2(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV2', 'KHv2']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Mu']
return d
def mutate(self, x, x_b, Mu): return x
class KrillHerdV3(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:** http://www.sciencedirect.com/science/article/pii/S1007570412002171
**Reference paper:** <NAME>, <NAME>, Krill herd: A new bio-inspired optimization algorithm, Communications in Nonlinear Science and Numerical Simulation, Volume 17, Issue 12, 2012, Pages 4831-4845, ISSN 1007-5704, https://doi.org/10.1016/j.cnsns.2012.05.010.
"""
Name = ['KrillHerdV3', 'KHv3']
@staticmethod
def typeParameters():
d = KrillHerd.typeParameters()
del d['Cr']
return d
def crossover(self, x, xo, Cr): return x
class KrillHerdV11(KrillHerd):
r"""Implementation of krill herd algorithm.
**Algorithm:** Krill Herd Algorithm
**Date:** 2018
**Authors:** <NAME>
**License:** MIT
**Reference URL:**
**Reference paper:**
"""
Name = ['KrillHerdV11', 'KHv11']
def ElitistSelection(self, KH, KH_f, KHo, KHo_f):
ipb = | where(KHo_f >= KH_f) | numpy.where |
import numpy as np
from .. import lasso, group_lasso, strong_rules, warm_start
from ..basil import basil_inner_loop, basil
from ...tests.decorators import set_seed_for_test
@set_seed_for_test()
def test_lasso_agreement(n=200,p=50):
'''
check to see if it agrees with lasso path
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
Y += np.dot(X, betaX) + np.random.standard_normal(n)
groups = np.arange(p)
group_lasso1 = group_lasso.gaussian(X,
Y,
groups)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-12)
weights = np.ones(p)
lasso2 = lasso.gaussian(X,
Y,
weights)
sol2 = strong_rules(lasso2,
lagrange_sequence,
(lasso2.solution, lasso2.grad_solution),
inner_tol=1.e-12)
beta1 = sol1['beta']
beta2 = sol2['beta']
np.testing.assert_allclose(beta1, beta2)
@set_seed_for_test()
def test_path_subsample(n=200,p=50):
'''
compare a subsample path to the full path on subsampled data
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
Y += np.dot(X, betaX) + np.random.standard_normal(n)
cases = range(n//2)
groups = ['a', 'a', 'a', 'b', 'b']
for i in range(9):
groups.extend([str(i)]*5)
np.random.shuffle(groups)
group_lasso1 = group_lasso.gaussian(X,
Y,
groups)
group_lasso1 = group_lasso1.subsample(cases)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-10)
beta1 = sol1['beta']
group_lasso2 = group_lasso.gaussian(X[cases],
Y[cases],
groups)
sol2 = strong_rules(group_lasso2,
lagrange_sequence,
(group_lasso2.solution, group_lasso2.grad_solution),
inner_tol=1.e-10)
beta2 = sol2['beta']
np.testing.assert_allclose(beta1, beta2, rtol=1.e-3)
@set_seed_for_test()
def test_path():
'''
run a basic path algorithm
'''
n, p = 200, 50
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
np.random.shuffle(betaX)
Y += np.dot(X, betaX) + np.random.standard_normal(n)
groups = ['a', 'a', 'a', 'b', 'b']
for i in range(9):
groups.extend([str(i)]*5)
np.random.shuffle(groups)
group_lasso1 = group_lasso.gaussian(X,
Y,
groups)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-5)
beta1 = sol1['beta']
@set_seed_for_test()
def test_unpenalized(n=200, p=50):
'''
run a basic path algorithm with some unpenalized variables
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
Y += np.dot(X, betaX) + np.random.standard_normal(n)
groups = ['a', 'a', 'a', 'b', 'b']
for i in range(9):
groups.extend([str(i)]*5)
weights = dict([(g,1) for g in np.unique(groups)])
weights['2'] = weights['3'] = 0
weights['a'] = weights['b'] = 2
group_lasso1 = group_lasso.gaussian(X,
Y,
groups,
weights=weights)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-5)
beta1 = sol1['beta']
@set_seed_for_test()
def test_elastic_net(n=200, p=50):
'''
run a basic elastic net path algorithm
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
np.random.shuffle(betaX)
Y += np.dot(X, betaX) + np.random.standard_normal(n)
enet = np.zeros(X.shape[1])
enet[4:7] = 0
groups = ['a', 'a', 'a', 'b', 'b']
for i in range(9):
groups.extend([str(i)]*5)
group_lasso1 = group_lasso.gaussian(X,
Y,
groups,
alpha=0.5,
elastic_net_param=enet)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-5)
beta1 = sol1['beta']
@set_seed_for_test()
def test_elastic_net_unpenalized(n=200, p=50):
'''
run a basic elastic net path algorithm with unpenalized
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = [3,4,5]
Y += np.dot(X, betaX) + np.random.standard_normal(n)
enet = np.ones(X.shape[1])
enet[4:8] = 0
groups = ['a', 'a', 'a', 'b', 'b']
for i in range(9):
groups.extend([str(i)]*5)
weights = dict([(g,1) for g in np.unique(groups)])
weights['2'] = weights['3'] = 0
weights['a'] = weights['b'] = 2
group_lasso1 = group_lasso.gaussian(X,
Y,
groups,
weights=weights,
alpha=0.5,
elastic_net_param=enet)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=23) # initialized at "null" model
sol1 = strong_rules(group_lasso1,
lagrange_sequence,
(group_lasso1.solution, group_lasso1.grad_solution),
inner_tol=1.e-5)
beta1 = sol1['beta']
@set_seed_for_test()
def test_basil_inner_loop(n=1000,p=600):
'''
test one run of the BASIL inner loop
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = np.array([3,4,5]) * np.sqrt(n)
Y += np.dot(X, betaX) + np.random.standard_normal(n)
enet = np.ones(X.shape[1])
enet[4:8] = 0
groups = ['a', 'a', 'a', 'b', 'b']
i = 0
while True:
groups.extend([str(i)]*5)
i += 1
if len(groups) >= p:
break
groups = groups[:p]
weights = dict([(g,1) for g in np.unique(groups)])
weights['2'] = weights['3'] = 0
weights['a'] = weights['b'] = 2
group_lasso1 = group_lasso.gaussian(X,
Y,
groups,
weights=weights,
alpha=0.5,
elastic_net_param=enet)
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=100) # initialized at "null" model
sol1 = basil_inner_loop(group_lasso1,
lagrange_sequence[:50],
(group_lasso1.solution.copy(), group_lasso1.grad_solution.copy()),
inner_tol=1.e-14,
step_nvar=10)
lagrange1, beta1, grad1, active1 = sol1
print(np.array(beta1).shape, 'chunk of path')
print(active1, 'active')
@set_seed_for_test()
def test_basil(n=200,p=100):
'''
run BASIL
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = np.array([3,4,5]) * np.sqrt(n)
Y += np.dot(X, betaX) + np.random.standard_normal(n)
groups = ['a', 'a', 'a', 'b', 'b']
i = 0
while True:
groups.extend([str(i)]*5)
i += 1
if len(groups) >= p:
break
groups = groups[:p]
weights = dict([(g,1) for g in np.unique(groups)])
weights['a'] = weights['b'] = 2
group_lasso1 = group_lasso.gaussian(X,
Y,
groups,
weights=weights)
print(group_lasso1.penalty)
print(np.linalg.norm(group_lasso1.solution))
lagrange_sequence = group_lasso.default_lagrange_sequence(group_lasso1.penalty,
group_lasso1.grad_solution,
nstep=100) # initialized at "null" model
sol1 = basil(group_lasso1,
lagrange_sequence,
(group_lasso1.solution.copy(), group_lasso1.grad_solution.copy()),
inner_tol=1.e-14,
step_nvar=10,
step_lagrange=20)
group_lasso2 = group_lasso.gaussian(X,
Y,
groups,
weights=weights)
print(group_lasso2.penalty)
print(np.linalg.norm(group_lasso2.solution))
sol2 = warm_start(group_lasso2,
lagrange_sequence,
(group_lasso2.solution.copy(), group_lasso2.grad_solution.copy()),
inner_tol=1.e-14)['beta']
assert(np.linalg.norm(sol1 - sol2) / np.linalg.norm(sol2) < 1.e-4)
@set_seed_for_test()
def test_basil_unpenalized(n=200,p=100):
'''
run BASIL w/ unpenalized variables
'''
X = np.random.standard_normal((n,p))
Y = np.random.standard_normal(n)
betaX = np.zeros(p)
betaX[:3] = np.array([3,4,5]) * np.sqrt(n)
Y += np.dot(X, betaX) + np.random.standard_normal(n)
enet = | np.ones(X.shape[1]) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 9 13:57:18 2019
@author: <NAME>
"""
'''
In this script I tried some code for implementing
1. convolution
2. pooling
3. back convolution
4. back pooling
for "Single/Multi Channel" inputs like MNIST by using ony Nnmpy
'''
'''
output[i, j] = np.sum(im_region * self.filters, axis=(1, 2)) #For a 2D * 3D @axis
'''
import numpy as np
#X = np.array([[[1,0,-1],[1,0,-1],[1,0,-1]] , [[1,0,-1],[1,0,-1],[1,0,-1]] , [[1,0,-1],[1,0,-1],[1,0,-1]]])
#Y = np.array([[[3,0,1,2,7,4],[1,5,8,9,3,1],[2,7,2,5,1,3],[0,1,3,1,7,8],[4,2,1,6,2,8],[2,4,5,2,3,9]],[[3,0,1,2,7,4],[1,5,8,9,3,1],[2,7,2,5,1,3],[0,1,3,1,7,8],[4,2,1,6,2,8],[2,4,5,2,3,9]],[[3,0,1,2,7,4],[1,5,8,9,3,1],[2,7,2,5,1,3],[0,1,3,1,7,8],[4,2,1,6,2,8],[2,4,5,2,3,9]]])
X = np.array([[1,0,-1],[1,0,-1],[1,0,-1]])
Y = np.array([[3,0,1,2,7,4],[1,5,8,9,3,1],[2,7,2,5,1,3],[0,1,3,1,7,8],[4,2,1,6,2,8],[2,4,5,2,3,9]])
k,l=X.shape
i,j=Y.shape
Op1 = np.zeros((i-k+1,i-k+1))
#---------------single layer Conv 1st filter----------------------------------------------------
for b in range(j-l+1):
for a in range(i-k+1):
Op1[b,a] = np.sum( | np.multiply(X,Y[b:k+b,a:a+3]) | numpy.multiply |
import numpy as np
import xarray as xr
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
from ..graph import Graph
from ...core.utils import as_id_array
from ..ugrid import (MESH_ATTRS, update_node_coords, update_nodes_at_link,
update_links_at_patch)
def remove_bad_patches(max_node_spacing, nodes_at_patch, neighbors_at_patch):
from .ext.delaunay import remove_tris
max_node_dist = np.ptp(nodes_at_patch, axis=1)
bad_patches = as_id_array(np.where(max_node_dist > max_node_spacing)[0])
if len(bad_patches) > 0:
remove_tris(nodes_at_patch, neighbors_at_patch, bad_patches)
nodes_at_patch = nodes_at_patch[:-len(bad_patches), :]
neighbors_at_patch = neighbors_at_patch[:-len(bad_patches), :]
return nodes_at_patch, neighbors_at_patch
def setup_links_and_patches(node_y_and_x, max_node_spacing=None):
from .ext.delaunay import _setup_links_at_patch, remove_tris
delaunay = Delaunay(list(zip(node_y_and_x[1], node_y_and_x[0])))
nodes_at_patch = | np.asarray(delaunay.simplices, dtype=int) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 07:24:33 2020
@author: Ciaran
"""
import numpy as np
import Metrica_Viz as mviz
import scipy
import scipy.signal as signal
def remove_player_velocities(team):
# remove player velocoties and acceleeration measures that are already in the 'team' dataframe
columns = [c for c in team.columns if c.split('_')[-1] in ['vx','vy','ax','ay','speed','acceleration']] # Get the player ids
team = team.drop(columns=columns)
return team
def lastrow_calc_player_velocities(team, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12):
""" calc_player_velocities( tracking_data )
Calculate player velocities in x & y direciton, and total player speed at each timestamp of the tracking data
Parameters
-----------
team: the tracking DataFrame for home or away team
smoothing: boolean variable that determines whether velocity measures are smoothed. Default is True.
filter: type of filter to use when smoothing the velocities. Default is Savitzky-Golay, which fits a polynomial of order 'polyorder' to the data within each window
window: smoothing window size in # of frames
polyorder: order of the polynomial for the Savitzky-Golay filter. Default is 1 - a linear fit to the velcoity, so gradient is the acceleration
maxspeed: the maximum speed that a player can realisitically achieve (in meters/second). Speed measures that exceed maxspeed are tagged as outliers and set to NaN.
Returrns
-----------
team : the tracking DataFrame with columns for speed in the x & y direction and total speed added
"""
# remove any velocity data already in the dataframe
team = remove_player_velocities(team)
# Get the player ids
player_ids = np.unique( [ c[:-2] for c in team.columns if c[:3] in ['att','def'] ] )
# Calculate the timestep from one frame to the next. Should always be 0.05 within the same half
dt = team['Time [s]'].diff()
# estimate velocities for players in team
maxspeed = 12
smoothing=True
#filter_='Savitzky-Golay'
for player in player_ids: # cycle through players individually
# difference player positions in timestep dt to get unsmoothed estimate of velicity
vx = team[player+"_x"].diff() / dt
vy = team[player+"_y"].diff() / dt
if maxspeed>0:
# remove unsmoothed data points that exceed the maximum speed (these are most likely position errors)
raw_speed = np.sqrt( vx**2 + vy**2 )
vx[ raw_speed>maxspeed ] = np.nan
vy[ raw_speed>maxspeed ] = np.nan
if smoothing:
#if filter_=='Savitzky-Golay':
# calculate velocity
# vx = signal.savgol_filter(vx,window_length=7,polyorder=1)
# vy = signal.savgol_filter(vy,window_length=7,polyorder=1)
#elif filter_=='moving average':
ma_window = np.ones( window ) / window
# calculate velocity
vx = np.convolve( vx , ma_window, mode='same' )
vy = np.convolve( vy , ma_window, mode='same' )
# put player speed in x,y direction, and total speed back in the data frame
team[player + "_vx"] = vx
team[player + "_vy"] = vy
team[player + "_speed"] = np.sqrt( vx**2 + vy**2 )
return team
class player(object):
"""
player() class
Class defining a player object that stores position, velocity, time-to-intercept and pitch control contributions for a player
__init__ Parameters
-----------
pid: id (jersey number) of player
team: row of tracking data for team
teamname: team name "Home" or "Away"
params: Dictionary of model parameters (default model parameters can be generated using default_model_params() )
methods include:
-----------
simple_time_to_intercept(r_final): time take for player to get to target position (r_final) given current position
probability_intercept_ball(T): probability player will have controlled ball at time T given their expected time_to_intercept
"""
# player object holds position, velocity, time-to-intercept and pitch control contributions for each player
def __init__(self,pid,team,teamname,params):
self.id = pid
self.teamname = teamname
self.playername = "%s_%s_" % (teamname,pid)
self.vmax = params['max_player_speed'] # player max speed in m/s. Could be individualised
self.reaction_time = params['reaction_time'] # player reaction time in 's'. Could be individualised
self.tti_sigma = params['tti_sigma'] # standard deviation of sigmoid function (see Eq 4 in Spearman, 2018)
self.get_position(team)
self.get_velocity(team)
self.PPCF = 0. # initialise this for later
def get_position(self,team):
self.position = np.array( [ team[self.playername+'x'], team[self.playername+'y'] ] )
self.inframe = not np.any( np.isnan(self.position) )
def get_velocity(self,team):
self.velocity = np.array( [ team[self.playername+'vx'], team[self.playername+'vy'] ] )
if np.any( np.isnan(self.velocity) ):
self.velocity = np.array([0.,0.])
def simple_time_to_intercept(self, r_final):
self.PPCF = 0. # initialise this for later
# Time to intercept assumes that the player continues moving at current velocity for 'reaction_time' seconds
# and then runs at full speed to the target position.
r_reaction = self.position + self.velocity*self.reaction_time
self.time_to_intercept = self.reaction_time + np.linalg.norm(r_final-r_reaction)/self.vmax
return self.time_to_intercept
def probability_intercept_ball(self,T):
# probability of a player arriving at target location at time 'T' given their expected time_to_intercept (time of arrival), as described in Spearman 2018
f = 1/(1. + np.exp( -np.pi/ | np.sqrt(3.0) | numpy.sqrt |
import copy
import logging.config
import os
import pickle
# for Logging handling
import sys
import time
import numpy as np
from numpy.linalg import LinAlgError
from scipy.optimize import minimize
import model
logger = logging.getLogger(__name__)
def nonzero_indices(a):
"""Get an index with non-zero element.
Parameters
----------
a : numpy.ndarray
array
Returns
-------
np.nonzero() : numpy.ndarray
Index with non-zero element
"""
return (np.nonzero(a)[0])
def create_directory(dir_name):
"""create directory
Parameters
----------
dir_name : str(file path)
create directory name
Returns
-------
None
"""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
else:
pass
def calc_diff(C_pre, C_pos, t_pre, t_pos, rss_pre, rss_pos):
"""calculate difference
Parameters
----------
C_pre : numpy.ndarray
initialize control points
C_pos : numpy.ndarray
control points
t_pre : numpy.ndarray
initialize parameters
t_pos : numpy.ndarray
parameters
rss_pre : int
initialize rss
rss_pos : int
rss
Returns
-------
np.abs() : numpy.ndarray
absolute value
"""
if t_pre.shape[1] > t_pos.shape[1]:
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
else:
t_pre = np.c_[t_pre, 1 - np.sum(t_pre, axis=1)]
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
ratio_sum = 0
for key in C_pre:
ratio_sum += np.linalg.norm(C_pre[key] - C_pos[key]) / np.linalg.norm(
C_pre[key])
diff = rss_pre - rss_pos
logger.debug("{} {} {}".format(rss_pre, rss_pos, diff))
return (np.abs(diff))
def calc_gd_igd(dd1, dd2):
"""Calculate gd and igd.
Parameters
----------
dd1 : numpy.ndarray
estimated bezier simplex sample
dd2 : numpy.ndarray
validation data
Returns
-------
gd : float
Generational Distance
igd : float
Inverted Generational Distance
"""
gd = 0
igd = 0
for i in range(dd2.shape[0]):
d2 = dd2[i, :]
tmp = dd1 - d2
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
gd += v
for i in range(dd1.shape[0]):
d1 = dd1[i, :]
tmp = dd2 - d1
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
igd += v
return (gd / dd2.shape[0], igd / dd1.shape[0])
class BorgesPastvaTrainer:
"""Polynomial Regression Trainer.
Attributes
----------
dimSpace : int
degree
dimSimplex : int
dimension
degree : int
dimension of constol point
"""
def __init__(self, dimSpace, degree, dimSimplex):
"""Borges Pastva Trainer initialize.
Parameters
----------
dimSpace : int
degree
degree : int
dimension of constol point
dimSimplex : int
dimension
Returns
----------
None
"""
self.dimSpace = dimSpace # degree of bezier simplex
self.dimSimplex = dimSimplex # dimension of bezier simplex
self.degree = degree # dimension of constol point
self.bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
def initialize_control_point(self, data):
"""Initialize control point.
Parameters
----------
data : list
test data
Returns
----------
C : dict
control point
"""
bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
C = bezier_simplex.initialize_control_point(data)
return (C)
def gradient(self, c, t):
"""Calculate gradient.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
g : float
gradient
"""
g = {}
x = {}
for d in range(self.dimSimplex - 1):
x[d] = np.zeros(self.dimSpace)
for d in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[d][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d, d1=None)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for d in x:
g[(d, )] = x[d]
return (g)
def hessian(self, c, t):
"""Calculate hessian.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
h : dict
hessian matrix
"""
h = {}
x = {}
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
x[(d1, d2)] = np.zeros(self.dimSpace)
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[(d1, d2)][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d1, d1=d2)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for (d1, d2) in x:
h[(d1, d2)] = x[(d1, d2)]
return (h)
def initialize_parameter(self, c, data):
"""Initialize parameter.
Parameters
----------
c : dict
control point
data : numpy.ndarray
sample points
Returns
----------
tt_ : numpy.ndarray
nearest parameter of each sample points
xx_ : numpy.ndarray
nearest points on the current bezier simplex
"""
tt, xx = self.bezier_simplex.meshgrid(c)
tt_ = np.empty([data.shape[0], self.dimSimplex])
xx_ = np.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
a = data[i, :]
tmp = xx - a
norm = np.linalg.norm(tmp, axis=1)
amin = np.argmin(norm)
tt_[i, :] = tt[amin, :]
xx_[i, :] = xx[amin, :]
return (tt_, xx_)
def inner_product(self, c, t, x):
"""Inner product.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
f : numpy.ndarray
point
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
f = np.array(np.zeros(self.dimSimplex - 1))
for d in range(self.dimSimplex - 1):
f[d] = sum(g[(d, )][i] * (b[i] - x[i])
for i in range(self.dimSpace))
return (f)
def inner_product_jaccobian(self, c, t, x):
"""Inner product(jaccobian).
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
j : numpy.ndarray
jaccobian matrix
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
h = self.hessian(c, t)
j = np.zeros([self.dimSimplex - 1, self.dimSimplex - 1])
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
j[d1, d2] = sum(h[(d1, d2)][i] * (b[i] - x[i]) +
g[(d1, )][i] * g[(d2, )][i]
for i in range(self.dimSpace))
return (j)
def newton_method(self, c, t_init, x, newton_itr=20, tolerance=10**(-5)):
"""Newton method.
Parameters
----------
c : dict
control point
t_init : list
parameter
x : numpy.ndarray
point
newton_itr : int
iterate value
tolerance : int
tolerance
Returns
----------
t_k : numpy.ndarray
output point
"""
t_k = copy.deepcopy(t_init)
for k in range(newton_itr):
f = self.inner_product(c, t_k, x)
if np.linalg.norm(f) > tolerance:
j = self.inner_product_jaccobian(c, t_k, x)
# for Logging handling
try:
d = np.linalg.solve(j, f)
except LinAlgError as e:
logger.critical("{0}".format(e))
logger.critical("The arguments are shown below")
logger.critical(j)
logger.critical(f)
sys.exit()
t_k = t_k - d
else:
break
return (t_k)
def projection_onto_simplex(self, t):
"""Projection onto simplex.
Parameters
----------
t : list
parameter
Returns
----------
res : numpy.ndarray
parameter
"""
if np.min(t) >= 0 and np.sum(t) <= 1:
return (t)
else:
tmp = np.append(t, 1 - np.sum(t))
def l2norm(x):
return (np.linalg.norm(x - tmp))
cons = []
for i in range(self.dimSimplex):
cons = cons + [{'type': 'ineq', 'fun': lambda x: x[i]}]
cons = cons + [{'type': 'eq', 'fun': lambda x: -np.sum(x) + 1}]
res = minimize(l2norm, x0=tmp, constraints=cons)
return (res.x[0:self.dimSimplex - 1])
def update_parameter(self, c, t_mat, data):
"""Projection onto simplex.
Parameters
----------
c : dict
control point
t_mat : list
parameter
data : list
test data
Returns
----------
tt_ : numpy.ndarray
parameter
xx_ : numpy.ndarray
points
"""
tt_ = np.empty([data.shape[0], self.dimSimplex - 1])
xx_ = np.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
x = data[i]
t = t_mat[i][0:self.dimSimplex - 1]
t_hat = self.newton_method(c, t, x)
t_hat2 = self.projection_onto_simplex(t_hat)
x_hat = self.bezier_simplex.sampling(c, t_hat2)
tt_[i] = t_hat2
xx_[i] = x_hat
return (tt_, xx_)
def normal_equation(self, t_mat, data, c, indices_all, indices_fix):
"""Normal equation.
Parameters
----------
t_mat : list
parameter
data : list
test data
c : dict
control point
indices_all : list
all index
indices_fix : list
fix index
Returns
----------
mat_l : numpy.ndarray
output points
mat_r : numpy.ndarray
output points
"""
mat_r = np.empty([t_mat.shape[0], len(indices_all) - len(indices_fix)])
mat_l = copy.deepcopy(data)
for i in range(t_mat.shape[0]):
jj = 0
for j in range(len(indices_all)):
key = indices_all[j]
if key not in indices_fix:
mat_r[i, jj] = self.bezier_simplex.monomial_diff(
multi_index=key, d0=None,
d1=None)(*t_mat[i, 0:self.dimSimplex - 1])
jj += 1
if key in indices_fix:
mat_l[i, :] = mat_l[i] - self.bezier_simplex.monomial_diff(
multi_index=key, d0=None, d1=None)(
*t_mat[i, 0:self.dimSimplex - 1]) * c[key]
return (mat_l, mat_r)
def update_control_point(self, t_mat, data, c, indices_all, indices_fix):
"""Normal equation.
Parameters
----------
t_mat : list
parameter
data : list
test data
c : dict
control point
indices_all : list
all index
indices_fix : list
fix index(control point)
Returns
----------
dic_c : numpy.ndarray
output points
"""
dic_c = {}
for key in indices_all:
dic_c[key] = np.empty(self.dimSpace)
mat_l, mat_r = self.normal_equation(t_mat, data, c, indices_all,
indices_fix)
for i in range(data.shape[1]):
y = mat_l[:, i]
# for Logging handling
try:
c_hat = np.linalg.solve(np.dot(mat_r.T, mat_r),
np.dot(mat_r.T, y))
except LinAlgError as e:
logger.critical("{0}".format(e))
logger.critical("The arguments are shown below")
logger.critical(np.dot(mat_r.T, mat_r))
logger.critical(np.dot(mat_r.T, y))
sys.exit()
jj = 0
for j in range(len(indices_all)):
key = indices_all[j]
if key in indices_fix:
dic_c[key][i] = c[key][i]
if key not in indices_fix:
dic_c[key][i] = c_hat[jj]
jj += 1
return (dic_c)
def train(self,
data,
result_dir='',
flag_write_meshgrid=1,
C_init=None,
indices_fix=None,
max_iteration=30,
tolerance=10**(-4),
data_val=None):
"""Borges Pastva Training.
Parameters
----------
data : list
test data
result_dir : str(file path)
directory name
flag_write_meshgrid : int
fragment
C_init : dict
control point
indices_fix : list
fix index
max_iteration : int
max iteration
tolerance : int
tolerance
data_val
all data
Returns
----------
C_pos : numpy.ndarray
output points
"""
create_directory(result_dir)
create_directory(result_dir + '/control_points')
create_directory(result_dir + '/meshgrid')
start = time.time()
# concat data
if isinstance(data, dict):
logger.debug("input data is dictionary!!!")
index = 0
for key in data:
if len(key) == 1:
data[key] = data[key].reshape((1, self.dimSpace))
if index == 0:
data_array = data[key]
else:
data_array = np.r_[data_array, data[key]]
index = index + 1
data = data_array
else:
logger.debug("input data is ndarray!!!")
logger.debug("datashape{}".format(data.shape))
# initialize parameter
C_pre = copy.deepcopy(C_init)
tt_init, xx_pre = self.initialize_parameter(c=C_pre, data=data)
tt_pre = tt_init
rss_pre = 100000
for itr in range(max_iteration):
self.bezier_simplex.write_control_point(
C=C_pre,
filename=result_dir + '/control_points/control_point_itr_' +
'{0:03d}'.format(itr))
if flag_write_meshgrid == 1:
self.bezier_simplex.write_meshgrid(C=C_pre,
filename=result_dir +
'/meshgrid/meshgrid_itr_' +
'{0:03d}'.format(itr))
# update t
tt_pos, xx_pos = self.update_parameter(c=C_pre,
t_mat=tt_pre,
data=data)
# update control points
C_pos = self.update_control_point(t_mat=tt_pos,
data=data,
c=C_pre,
indices_all=list(C_pre.keys()),
indices_fix=indices_fix)
# calc rss
rss_pos = | np.linalg.norm(data - xx_pos) | numpy.linalg.norm |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kameleon_rks.tools.convergence_stats import autocorr
def pdf_grid(Xs, Ys, est):
D = np.zeros((len(Xs), len(Ys)))
G = np.zeros(D.shape)
# this is in-efficient, log_pdf_multiple on a 2d array is faster
for i, x in enumerate(Xs):
for j, y in enumerate(Ys):
point = np.array([x, y])
D[j, i] = est.log_pdf(point)
G[j, i] = np.linalg.norm(est.grad(point))
return D, G
def visualise_fit_2d(est, X, Xs=None, Ys=None):
# visualise found fit
plt.figure()
if Xs is None:
Xs = np.linspace(-5, 5)
if Ys is None:
Ys = np.linspace(-5, 5)
D, G = pdf_grid(Xs, Ys, est)
plt.subplot(121)
visualise_array(Xs, Ys, D, X)
plt.title("log pdf")
plt.subplot(122)
visualise_array(Xs, Ys, G, X)
plt.title("gradient norm")
plt.tight_layout()
def visualise_array(Xs, Ys, A, samples=None):
im = plt.imshow(A, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
if samples is not None:
plt.plot(samples[:, 0], samples[:, 1], 'bx')
plt.ylim([Ys.min(), Ys.max()])
plt.xlim([Xs.min(), Xs.max()])
def visualise_trajectory(Qs, acc_probs, log_pdf_q, D, log_pdf=None, Z=None, log_domain=True):
assert Qs.ndim == 2
plot_density = log_pdf is not None and D == 2
plt.figure(figsize=(10, 12))
plt.subplot(411)
# plot density if given and dimension is 2
if plot_density:
Xs = | np.linspace(-30, 30, 75) | numpy.linspace |
import matplotlib.pyplot as plt
import cv2
import imutils
import numpy as np
import os
cap = cv2.VideoCapture('./example.mp4')
def dist(i,j):
return ( (i[0]-j[0])**2 + (i[1]-j[1])**2 )**0.5
def actuate(x,y):
if(0<=x<=200):
signal(1)
if(200<=x<=200):
signal(2)
if(400<=x<=600):
signal(3)
if(600<=x<=800):
signal(4)
def signal(x):
return 0
def oneframe(ix):
ret,frame = cap.read()
#plt.imshow(frame)
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
image = frame
(regions, _) = hog.detectMultiScale(image, winStride=(4, 4), padding=(4, 4), scale=1.05)
hpts = [(i[0] + int(i[2]/2), i[1] + int(i[-1]/2) ) for i in regions]
close = []
far = []
near = []
dists = []
for i in range(len(hpts)):
for j in range(len(hpts)):
if(hpts[i]!=hpts[j]):
dists.append(dist(hpts[i],hpts[j]))
if(dist(hpts[i],hpts[j])<100):
close.append(i)
if(dist(hpts[i],hpts[j])>100 and dist(hpts[i],hpts[j])<400):
near.append(i)
if(dist(hpts[i],hpts[j])>500):
far.append(i)
close,near,far = np.unique(close),np.unique(near), | np.unique(far) | numpy.unique |
import numpy as np
class MutiplicativeCascade:
def __init__(self, k_max, M, randomize=False):
self.k_max = k_max
self.M = M
self.randomize = randomize
self.data = []
def cascade(self):
y = self._cascade_recursively(1, 1, 1, self.k_max, self.M, self.randomize)
x = np.linspace(0, 1, num=len(y), endpoint=False)
y = np.insert(y, 0, 0)
x = np.append(x, 1)
self.data = np.stack([x, y], axis=1)
def _cascade_recursively(self, x, y, k, k_max, M, randomize=False):
"""
:param x: width of current cell
:param y: height of current cell
:param k: current branch of the recursion tree
:param k_max: max depth of the recursion tree
:param M: array m0, m1, ..., mb where sum(M) = 1
:param randomize: whether or not to shuffle M before assigning mass to child cells. See page 13 of "A Multifractal Model of Asset Returns" 1997
:return: [y, ...] corresponding to multiplicative cascade y coordinates
"""
a = x * y
x_next = x / len(M)
M_shuffle = np.copy(M)
if randomize:
np.random.shuffle(M_shuffle)
else:
M_shuffle = M_shuffle
y_i = | np.array([]) | numpy.array |
#!/usr/bin/env python
"""
@authors: <NAME>, <NAME>
Date Created: 9/24/2011
"""
from __future__ import division, print_function
from future.utils import iteritems, viewitems
from builtins import int
import os
import sys
import subprocess
import time
from copy import copy
import multiprocessing as mpr
import argparse
import fnmatch
from collections import OrderedDict
# MapPy
try:
from . import raster_tools
except:
import raster_tools
from . import utils
from .errors import logger
from .helpers import _iteration_parameters
# Numpy
try:
import numpy as np
except ImportError:
raise ImportError('NumPy must be installed')
# Numexpr
try:
import numexpr as ne
ne.set_num_threads(mpr.cpu_count())
numexpr_installed = True
except:
numexpr_installed = False
# Carray
# try:
# import carray as ca
# carray_installed = True
# except:
# carray_installed = False
# GDAL
try:
from osgeo import gdal
from osgeo.gdalconst import *
except ImportError:
raise ImportError('GDAL must be installed')
# Scikit-image
try:
from skimage.exposure import rescale_intensity
except ImportError:
raise ImportError('Scikit-image must be installed')
try:
import deprecation
except ImportError:
raise ImportError('deprecation must be installed (pip install deprecation)')
old_settings = np.seterr(all='ignore')
class SensorInfo(object):
"""
A class to hold sensor names, wavelengths, and equations.
"""
def __init__(self):
self.sensors = utils.SUPPORTED_SENSORS
self.band_orders = utils.SENSOR_BAND_DICT
# The wavelengths needed to compute the index.
# The wavelengths are loaded in order, so the
# order should match the equations in
# ``self.equations``.
self.wavelength_lists = utils.VI_WAVELENGTHS
# The vegetation index equations. The arrays are
# loaded from ``self.wavelength_lists``. For example,
# ``array01`` of 'ARVI' would be the 'blue' wavelength.
self.equations = \
{'ARVI': '((array03 / scale_factor) - ((array02 / scale_factor) - '
'y*((array01 / scale_factor) - (array02 / scale_factor)))) / '
'((array03 / scale_factor) + ((array02 / scale_factor) - '
'y*((array01 / scale_factor) - (array02 / scale_factor))))',
'BRIGHT': '((array01 / scale_factor)**2 + (array02 / scale_factor)**2 + (array03 / scale_factor)**2 + (array04 / scale_factor)**2)**0.5',
'CBI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'CIRE': '((array02 / scale_factor) / (array01 / scale_factor)) - 1.',
'EVI': 'g * (((array03 / scale_factor) - (array02 / scale_factor)) / '
'((array03 / scale_factor) + (c1 * (array02 / scale_factor)) - '
'(c2 * (array01 / scale_factor)) + L))',
'EVI2': 'g * (((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + L + (c1 * (array01 / scale_factor))))',
'IPVI': '(array02 / scale_factor) / ((array02 / scale_factor) + (array01 / scale_factor))',
'MSAVI': '((2 * array02 + 1) - ((((2 * array02 + 1)**2) - (8 * (array02 - array01)))**.5)) / 2',
'GNDVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'MNDWI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'NDSI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'NDBAI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'NBRI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'NDII': '(array03 - array02 + array01) / (array03 + array02 + array01)',
'NDVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'RENDVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'NDWI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'PNDVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'RBVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'GBVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'ONDVI': '(4. / pi) * arctan(((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor)))',
'SATVI': '((((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor) + L)) * (1. + L)) - '
'((array03 / scale_factor) / 2.)',
'SAVI': '(((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor) + L)) * (1. + L)',
'OSAVI': 'arctan(((((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor) + L)) * (1. + L)) / 1.5) * 2.',
'SVI': '(array02 / scale_factor) / (array01 / scale_factor)',
'TNDVI': 'sqrt((((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))) * .5)',
'TVI': 'sqrt((((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))) + .5)',
'TWVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'YNDVI': '((array02 / scale_factor) - (array01 / scale_factor)) / '
'((array02 / scale_factor) + (array01 / scale_factor))',
'VCI': '(((array02 - array01) / (array02 + array01)) - min_ndvi) / (max_ndvi - min_ndvi)',
'VISMU': '((array01 / scale_factor) + (array02 / scale_factor) + (array03 / scale_factor)) / 3.',
'WI': '(array01 / scale_factor) + (array02 / scale_factor)'}
# The data ranges for scaling, but only
# used if the output storage type is not
# equal to 'float32'.
self.data_ranges = {'ARVI': (),
'BRIGHT': (0.0, 1.0),
'CBI': (-1.0, 1.0),
'CIRE': (-1.0, 1.0),
'EVI': (0., 1.0),
'EVI2': (0., 1.0),
'IPVI': (),
'MSAVI': (),
'GNDVI': (-1.0, 1.0),
'MNDWI': (-1.0, 1.0),
'NDSI': (-1.0, 1.0),
'NDBAI': (-1.0, 1.0),
'NBRI': (-1.0, 1.0),
'NDII': (-1.0, 1.0),
'NDVI': (-1.0, 1.0),
'RENDVI': (-1.0, 1.0),
'NDWI': (-1.0, 1.0),
'PNDVI': (-1.0, 1.0),
'RBVI': (-1.0, 1.0),
'GBVI': (-1.0, 1.0),
'ONDVI': (),
'SATVI': (),
'SAVI': (),
'OSAVI': (),
'SVI': (),
'TNDVI': (),
'TVI': (),
'YNDVI': (-1.0, 1.0),
'TWVI': (-1, 1),
'VCI': (),
'VISMU': (0., 1.0),
'WI': (0.0, 1.0)}
def list_expected_band_order(self, sensor):
# Return the dictionary sorted by values
self.expected_band_order = OrderedDict(sorted(list(iteritems(self.band_orders[sensor])), key=lambda sbo: sbo[1]))
logger.info('\nExpected band order for {}:\n'.format(sensor))
logger.info(' WAVELENGTH Band')
logger.info(' ---------- ----')
sp = ' '
for w, b in viewitems(self.expected_band_order):
gap_string = ''
gap_len = 12 - len(w)
for gx in range(0, gap_len):
gap_string += sp
logger.info(' {}{}{:d}'.format(w.upper(), gap_string, b))
print('')
def list_indice_options(self, sensor):
"""
Lists the vegetation indices that can be computed from the given sensor.
Args:
sensor (str): The sensor.
"""
if sensor not in self.sensors:
raise NameError('{} not a sensor option. Choose one of {}'.format(sensor, ', '.join(self.sensors)))
self.sensor_indices = []
# A list of wavelengths in the
# current sensor.
sensor_wavelengths = list(self.band_orders[sensor])
# All of the vegetation index wavelengths must
# be in the sensor wavelength.
for veg_index, indice_wavelengths in viewitems(self.wavelength_lists):
if set(indice_wavelengths).issubset(sensor_wavelengths):
self.sensor_indices.append(veg_index)
class VegIndicesEquations(SensorInfo):
"""
A class to compute vegetation indices
Args:
image_array (ndarray)
no_data (Optional[int]): The output 'no data' value. Overflows and NaNs are filled with ``no_data``.
Default is 0.
in_no_data (Optional[int]): The input 'no data' value.
chunk_size (Optional[int]): The chunk size to determine whether to use ``ne.evaluate``. Default is -1, or
use ``numexpr``.
mask_array (Optional[2d array]): A mask where anything equal to 255 is background. Default is None.
"""
def __init__(self, image_array, no_data=0, in_no_data=0, chunk_size=-1, mask_array=None):
self.image_array = np.float32(image_array)
self.no_data = no_data
self.in_no_data = in_no_data
self.chunk_size = chunk_size
self.mask_array = mask_array
SensorInfo.__init__(self)
try:
self.array_dims, self.array_rows, self.array_cols = image_array.shape
except:
raise ValueError('The input array must be at least 3d.')
def rescale_range(self, array2rescale, in_range=()):
if self.out_type > 3:
raise ValueError('The output type cannot be greater than 3.')
if self.out_type == 2:
if in_range:
array2rescale_ = np.uint8(rescale_intensity(array2rescale,
in_range=in_range,
out_range=(0, 254)))
else:
array2rescale_ = np.uint8(rescale_intensity(array2rescale, out_range=(0, 254)))
elif self.out_type == 3:
if in_range:
array2rescale_ = np.uint16(rescale_intensity(array2rescale,
in_range=in_range,
out_range=(0, 10000)))
else:
array2rescale_ = np.uint16(rescale_intensity(array2rescale, out_range=(0, 10000)))
return np.where(array2rescale == self.no_data, self.no_data, array2rescale_)
def compute(self, vi_index, out_type=1, scale_factor=1.0, **kwargs):
"""
Args:
vi_index (str): The vegetation index to compute.
out_type (Optional[int]): This controls the output scaling. Default is 1, or return 'as is'. Choices
are [1, 2, 3].
1 = raw values (float32)
2 = scaled (byte)
3 = scaled (uint16)
scale_factor (Optional[float]): A scale factor to divide the inputs by. Default is 1.
Example:
>>> from mappy.features import VegIndicesEquations
>>>
>>> # Create a fake 2-band array.
>>> image_stack = np.random.randn(2, 100, 100, dtype='float32')
>>>
>>> # Setup the vegetation index object.
>>> vie = VegIndicesEquations(image_stack)
>>>
>>> # Calculate the NDVI vegetation index.
>>> ndvi = vie.compute('NDVI')
"""
self.vi_index = vi_index
self.out_type = out_type
self.n_bands = len(self.wavelength_lists[self.vi_index.upper()])
# Use ``numexpr``.
if self.chunk_size == -1:
if vi_index.lower() == 'twvi':
imcopy = self.image_array.copy()
if kwargs:
self.image_array = imcopy[:2]
self.vi_index = 'evi2'
evi2 = self.run_index(scale_factor, **kwargs)
self.image_array = imcopy[1:]
self.vi_index = 'ndsi'
ndsi = self.run_index(scale_factor, **kwargs)
else:
self.image_array = imcopy[:2]
self.vi_index = 'evi2'
evi2 = self.run_index(scale_factor)
self.image_array = imcopy[1:]
self.vi_index = 'ndsi'
ndsi = self.run_index(scale_factor)
ndsi = rescale_intensity(ndsi, in_range=(-1, 1), out_range=(0, 1))
self.image_array = np.stack((evi2, ndsi))
self.vi_index = 'twvi'
if kwargs:
return self.run_index(scale_factor, **kwargs)
else:
return self.run_index(scale_factor)
else:
vi_functions = {'ARVI': self.ARVI,
'BRIGHT': self.BRIGHT,
'CBI': self.CBI,
'CIre': self.CIre,
'EVI': self.EVI,
'EVI2': self.EVI2,
'IPVI': self.IPVI,
'GNDVI': self.GNDVI,
'MNDWI': self.MNDWI,
'MSAVI': self.MSAVI,
'NDSI': self.NDSI,
'NDBAI': self.NDBAI,
'NBRI': self.NBR,
'NDVI': self.NDVI,
'RENDVI': self.RENDVI,
'ONDVI': self.ONDVI,
'NDWI': self.NDWI,
'PNDVI': self.PNDVI,
'RBVI': self.RBVI,
'GBVI': self.GBVI,
'SATVI': self.SATVI,
'SAVI': self.SAVI,
'OSAVI': self.OSAVI,
'SVI': self.SVI,
'TNDVI': self.TNDVI,
'TVI': self.TVI,
'TWVI': self.TWVI,
'YNDVI': self.YNDVI,
'VCI': self.VCI,
'WI': self.WI}
if self.vi_index.upper() not in vi_functions:
raise NameError('{} is not a vegetation index option.'.format(self.vi_index))
vi_function = vi_functions[self.vi_index.upper()]
if kwargs:
return vi_function(kwargs)
else:
return vi_function()
def run_index(self, scale_factor, y=1., g=2.5, L=1., min_ndvi=-1, max_ndvi=1, **kwargs):
# EVI defaults
if self.vi_index.upper() == 'EVI' and not kwargs:
c1 = 6.0
c2 = 7.5
elif self.vi_index.upper() == 'EVI2' and not kwargs:
c1 = 2.4
no_data = self.no_data
in_no_data = self.in_no_data
pi = np.pi
# Setup a mask
if isinstance(self.mask_array, np.ndarray):
mask_array = self.mask_array
mask_equation = 'where(mask_array == 1, no_data, index_array)'
if self.n_bands == 2:
if self.image_array.shape[0] != 2:
logger.error(' The input array should have {:d} dimensions.'.format(self.n_bands))
raise ValueError
array01 = self.image_array[0]
array02 = self.image_array[1]
if not isinstance(self.mask_array, np.ndarray):
mask_equation = 'where((array01 == in_no_data) | (array02 == in_no_data), no_data, index_array)'
elif self.n_bands == 3:
if self.image_array.shape[0] != 3:
logger.error(' The input array should have {:d} dimensions.'.format(self.n_bands))
raise ValueError
array01 = self.image_array[0]
array02 = self.image_array[1]
array03 = self.image_array[2]
if not isinstance(self.mask_array, np.ndarray):
mask_equation = 'where((array01 == in_no_data) | (array02 == in_no_data) | (array03 == in_no_data), no_data, index_array)'
else:
logger.error(' The input array needs 2 or 3 bands.')
raise ValueError
index_array = ne.evaluate(self.equations[self.vi_index.upper()])
if self.vi_index.upper() == 'WI':
index_array = np.where(index_array > 0.5, 0, 1.0 - (index_array / 0.5))
d_range = self.data_ranges[self.vi_index.upper()]
if d_range:
if d_range[0] == -9999:
scale_data = False
else:
scale_data = True
# Clip lower and upper bounds.
index_array = ne.evaluate('where(index_array < {:f}, {:f}, index_array)'.format(d_range[0], d_range[0]))
index_array = ne.evaluate('where(index_array > {:f}, {:f}, index_array)'.format(d_range[1], d_range[1]))
# if self.out_type != 1:
# index_array += abs(d_range[0])
else:
scale_data = False
if scale_data:
if self.data_ranges[self.vi_index.upper()]:
if self.out_type == 2:
index_array = np.uint8(self.rescale_range(index_array, in_range=d_range))
elif self.out_type == 3:
index_array = np.uint16(self.rescale_range(index_array, in_range=d_range))
else:
if self.out_type == 2:
index_array = np.uint8(self.rescale_range(index_array, in_range=(0, 10000)))
elif self.out_type == 3:
index_array = np.uint16(index_array)
index_array[np.isinf(index_array) | np.isnan(index_array)] = self.no_data
index_array = ne.evaluate(mask_equation)
return index_array
def ARVI(self, y=1):
"""
Atmospherically Resistant Vegetation Index (ARVI)
Equation:
(nir - rb) / (nir + rb)
where, rb = red - y(blue - red)
where, y = gamma value (weighting factor depending on aersol type), (0.7 to 1.3)
"""
try:
blue = self.image_array[0]
red = self.image_array[1]
nir = self.image_array[2]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
rb1 = np.multiply(np.subtract(blue, red), y)
rb = np.subtract(red, rb1)
arvi = self.NDVI()
arvi[(blue == 0) | (red == 0) | (nir == 0)] = self.no_data
arvi[np.isinf(arvi) | np.isnan(arvi)] = self.no_data
if self.out_type > 1:
arvi = self.rescale_range(arvi)
return arvi
def BRIGHT(self):
try:
green = self.image_array[0]
red = self.image_array[1]
nir = self.image_array[2]
midir = self.image_array[3]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
bright = np.sqrt(green**2 + red**2 + nir**2 + midir**2)
bright[(green == 0) | (red == 0) | (nir == 0) | (midir == 0)] = self.no_data
bright[np.isinf(bright) | np.isnan(bright)] = self.no_data
if self.out_type > 1:
bright = self.rescale_range(bright)
return bright
def CBI(self):
"""
Coastal-Blue Index
Equation:
CBI = (blue - cblue) / (blue + cblue)
"""
try:
cblue = self.image_array[0]
blue = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
cbi = self.main_index(cblue, blue)
cbi[(cblue == 0) | (blue == 0)] = self.no_data
cbi[np.isinf(cbi) | np.isnan(cbi)] = self.no_data
if self.out_type > 1:
cbi = self.rescale_range(cbi, in_range=(-1., 1.))
return cbi
def CIre(self):
"""
Chlorophyll Index red-edge (CIre)
References:
Clevers, J.G.P.W. & <NAME>. (2013) Remote estimation of crop and grass chlorophyll and
nitrogen content using red-edge bands on Sentinel-2 and -3. International Journal of Applied
Earth Observation and Geoinformation, 23, 344-351.
"""
try:
rededge = self.image_array[0]
rededge3 = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
ci_re = np.subtract(np.divide(rededge3, rededge), 1.)
ci_re[(rededge == 0) | (rededge3 == 0)] = self.no_data
ci_re[np.isinf(ci_re) | np.isnan(ci_re)] = self.no_data
if self.out_type > 1:
ci_re = self.rescale_range(ci_re, in_range=(0., 1.))
return ci_re
def EVI(self, c1=6., c2=7.5, g=2.5, L=1.):
"""
Enhanced Vegetation Index (EVI)
Equation:
g * [ nir - Red
------------------------------
nir + C1 * Red - C2 * Blue + L
]
C1 = 6
C2 = 7.5
L = 1
g = 2.5
References:
Huete et al. (2002) Overview of the radiometric and biophysical performance of the
MODIS vegetation indices. Remote Sensing of Environment, 83, 195-213.
"""
try:
blue = self.image_array[0]
red = self.image_array[1]
nir = self.image_array[2]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
top = np.subtract(nir, red)
red_c1 = np.multiply(c1, red)
blue_c2 = np.multiply(c2, blue)
bottom = np.add(np.add(np.subtract(red_c1, blue_c2), nir), L)
evi = np.divide(top, bottom)
evi = np.multiply(evi, g)
evi[(blue == 0) | (red == 0) | (nir == 0)] = self.no_data
evi[np.isinf(evi) | np.isnan(evi)] = self.no_data
if self.out_type > 1:
evi = self.rescale_range(evi, in_range=(0., 1.))
return evi
def EVI2(self, c1=2.4, g=2.5, L=1.):
"""
Enhanced Vegetation Index (EVI2)
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>. 2008. "Development of a
two-band enhanced vegetation index without a blue band." Remote Sensing of Environment 112: 3833-3845.
Equation:
g * [ nir - Red
---------------------
nir + (C1 * Red) + 1
]
c1 = 2.4
g = 2.5
"""
try:
red = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
top = np.subtract(nir, red)
bottom = np.add(np.add(np.multiply(red, c1), nir), L)
evi2 = np.divide(top, bottom)
evi2 = np.multiply(evi2, g)
evi2[(red == 0) | (nir == 0)] = self.no_data
evi2[np.isinf(evi2) | np.isnan(evi2)] = self.no_data
if self.out_type > 1:
evi2 = self.rescale_range(evi2, in_range=(0., 1.))
return evi2
def IPVI(self):
"""
Equation:
IPVI = nir / (nir + red)
"""
try:
red = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
bottom = np.add(nir, red)
ipvi = np.divide(nir, bottom)
ipvi[(red == 0) | (nir == 0)] = self.no_data
ipvi[np.isinf(ipvi) | np.isnan(ipvi)] = self.no_data
if self.out_type > 1:
ipvi = self.rescale_range(ipvi)
return ipvi
def MSAVI(self):
"""
Modified Soil Adjusted Vegetation Index (MSAVI2)
Equation:
((2 * nir + 1) - sqrt(((2 * nir + 1)^2) - (8 * (nir - Red)))) / 2
"""
try:
red = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
topR1 = np.add(np.multiply(nir, 2.), 1.)
topR2 = np.power(topR1, 2.)
topR4 = np.multiply(np.subtract(nir, red), 8.)
topR5 = np.subtract(topR2, topR4)
topR6 = np.sqrt(topR5)
msavi = np.subtract(topR1, topR6)
msavi = np.divide(msavi, 2.)
msavi[(red == 0) | (nir == 0)] = self.no_data
msavi[np.isinf(msavi) | np.isnan(msavi)] = self.no_data
if self.out_type > 1:
msavi = self.rescale_range(msavi)
return msavi
def GNDVI(self):
"""
Green Normalised Difference Vegetation Index (GNDVI)
Equation:
GNDVI = (NIR - green) / (NIR + green)
"""
try:
green = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
gndvi = self.main_index(green, nir)
gndvi[(gndvi < -1.)] = -1.
gndvi[(gndvi > 1.)] = 1.
gndvi[(green == 0) | (nir == 0)] = self.no_data
gndvi[np.isinf(gndvi) | np.isnan(gndvi)] = self.no_data
if self.out_type > 1:
gndvi = self.rescale_range(gndvi, in_range=(-1., 1.))
return gndvi
def MNDWI(self):
"""
Modified Normalised Difference Water Index (MNDWI)
Equation:
MNDWI = (green - MidIR) / (green + MidIR)
Reference:
<NAME> (2006) Modification of normalised difference water index (NDWI) to enhance
open water features in remotely sensed imagery. IJRS 27:14.
"""
try:
midir = self.image_array[0]
green = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
mndwi = self.main_index(midir, green)
mndwi[(mndwi < -1.)] = -1.
mndwi[(mndwi > 1.)] = 1.
mndwi[(green == 0) | (midir == 0)] = self.no_data
mndwi[np.isinf(mndwi) | np.isnan(mndwi)] = self.no_data
if self.out_type > 1:
mndwi = self.rescale_range(mndwi, in_range=(-1., 1.))
return mndwi
def NDSI(self):
"""
Normalised Difference Soil Index (NDSI) (Rogers) or
Normalised Difference Water Index (NDWI) (Gao)
Equation:
NDSI = (MidIR - NIR) / (MidIR + NIR)
References:
<NAME>. & <NAME>. (2004) 'Reducing signature
variability in unmixing coastal marsh Thematic
Mapper scenes using spectral indices' International
Journal of Remote Sensing, 25(12), 2317-2335.
<NAME> (1996) 'NDWI A Normalized Difference Water
Index for Remote Sensing of Vegetation Liquid Water
From Space' Remote Sensing of Environment.
"""
try:
nir = self.image_array[0]
midir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
ndsi = self.main_index(nir, midir)
ndsi[(ndsi < -1.)] = -1.
ndsi[(ndsi > 1.)] = 1.
ndsi[(nir == 0) | (midir == 0)] = self.no_data
ndsi[np.isinf(ndsi) | np.isnan(ndsi)] = self.no_data
if self.out_type > 1:
ndsi = self.rescale_range(ndsi, in_range=(-1., 1.))
return ndsi
def NDBAI(self):
"""
Normalised Difference Bareness Index (NDBaI)
Equation:
NDBaI = (FarIR - MidIR) / (FarIR + MidIR)
Reference:
<NAME>, Chen, Xiaoling (2005) 'Use of Normalized
Difference Bareness Index in Quickly Mapping Bare
Areas from TM/ETM+' IEEE.
"""
try:
midir = self.image_array[0]
farir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
ndbai = self.main_index(midir, farir)
ndbai[(ndbai < -1.)] = -1.
ndbai[(ndbai > 1.)] = 1.
ndbai[(midir == 0) | (farir == 0)] = self.no_data
ndbai[np.isinf(ndbai) | np.isnan(ndbai)] = self.no_data
if self.out_type > 1:
ndbai = self.rescale_range(ndbai, in_range=(-1., 1.))
return ndbai
def NBR(self):
"""
Normalised Burn Ratio (NBR)
Equation:
NBR = (NIR - FarIR) / (NIR + FarIR)
"""
try:
farir = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
nbr = self.main_index(farir, nir)
nbr[(nbr < -1.)] = -1.
nbr[(nbr > 1.)] = 1.
nbr[(nbr == 0) | (nir == 0)] = self.no_data
nbr[np.isinf(nbr) | np.isnan(nbr)] = self.no_data
if self.out_type > 1:
nbr = self.rescale_range(nbr, in_range=(-1.0, 1.0))
return nbr
def NDVI(self):
"""
Normalised Difference Vegetation Index (NDVI)
Equation:
NDVI = (NIR - red) / (NIR + red)
"""
try:
red = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
ndvi = self.main_index(red, nir)
ndvi[(ndvi < -1.)] = -1.
ndvi[(ndvi > 1.)] = 1.
ndvi[(red == 0) | (nir == 0)] = self.no_data
ndvi[np.isinf(ndvi) | np.isnan(ndvi)] = self.no_data
if self.out_type > 1:
ndvi = self.rescale_range(ndvi, in_range=(-1., 1.))
return ndvi
def RENDVI(self):
"""
Rededge Normalised Difference Vegetation Index (RENDVI)
Equation:
RENDVI = (NIR - rededge) / (NIR + rededge)
"""
try:
rededge = self.image_array[0]
nir = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
rendvi = self.main_index(rededge, nir)
rendvi[(rendvi < -1.)] = -1.
rendvi[(rendvi > 1.)] = 1.
rendvi[(rededge == 0) | (nir == 0)] = self.no_data
rendvi[np.isinf(rendvi) | np.isnan(rendvi)] = self.no_data
if self.out_type > 1:
rendvi = self.rescale_range(rendvi, in_range=(-1., 1.))
return rendvi
def NDWI(self):
"""
Normalised Difference Water Index (NDWI)
Equation:
NDWI = (green - NIR) / (green + NIR)
Reference:
<NAME>. (1996) 'The use of the Normalized Difference
Water Index (NDWI) in the delineation of open water
features, International Journal of Remote Sensing, 17(7),
1425-1432.
"""
try:
nir = self.image_array[0]
green = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
ndwi = self.main_index(nir, green)
ndwi[(ndwi < -1.)] = -1.
ndwi[(ndwi > 1.)] = 1.
ndwi[(green == 0) | (nir == 0)] = self.no_data
ndwi[np.isinf(ndwi) | np.isnan(ndwi)] = self.no_data
if self.out_type > 1:
ndwi = self.rescale_range(ndwi, in_range=(-1., 1.))
return ndwi
def PNDVI(self):
"""
Pseudo Normalised Difference Vegetation Index (PNDVI)
Equation:
PNDVI = (red - green) / (red + green)
"""
try:
green = self.image_array[0]
red = self.image_array[1]
except:
raise ValueError('\nThe input array should have {:d} dimensions.\n'.format(self.n_bands))
pndvi = self.main_index(green, red)
pndvi[(pndvi < -1.)] = -1.
pndvi[(pndvi > 1.)] = 1.
pndvi[(green == 0) | (red == 0)] = self.no_data
pndvi[np.isinf(pndvi) | | np.isnan(pndvi) | numpy.isnan |
import numpy as np
import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
_torch_supported_dataset = ('mnist', 'fashion', 'cifar10', 'svhn')
_torch_dataset_key_mapping = {
'mnist': 'MNIST',
'fashion': 'FashionMNIST',
'cifar10': 'CIFAR10',
'svhn': 'SVHN',
}
_dataset_ratio_mapping = {
'mnist': [4000, 2000, 1000, 750, 500, 350, 200, 100, 60, 40],
'fashion': [4000, 2000, 1000, 750, 500, 350, 200, 100, 60, 40],
'cifar10': [4500, 2000, 1000, 800, 600, 500, 400, 250, 150, 80],
'svhn': [4500, 2000, 1000, 800, 600, 500, 400, 250, 150, 80],
}
def dataset_to_numpy(dataset):
loader = DataLoader(dataset, len(dataset))
x, y = next(iter(loader))
return x.numpy(), y.numpy()
def load_data(name, seed, imbalance=None, data_dir=None):
name = name.lower()
if data_dir is None:
data_dir = './data/%s/' % name
func_name = _torch_dataset_key_mapping[name] # if name in _torch_dataset_key_mapping else None
dataset_func = getattr(torchvision.datasets, func_name)
transform = transforms.Compose([transforms.ToTensor(), ])
if name in ('mnist', 'fashion', 'cifar10'):
train_dataset = dataset_func(data_dir, train=True, transform=transform, download=True)
test_dataset = dataset_func(data_dir, train=False, transform=transform, download=True)
elif name == 'svhn':
train_dataset = dataset_func(data_dir, split='train', transform=transform, download=True)
test_dataset = dataset_func(data_dir, split='test', transform=transform, download=True)
else:
raise NotImplementedError
X_train, y_train = dataset_to_numpy(train_dataset)
X_test, y_test = dataset_to_numpy(test_dataset)
X_train, y_train = _shuffle(X_train, y_train, seed)
X_train = np.transpose(X_train, axes=[0, 2, 3, 1])
X_test = np.transpose(X_test, axes=[0, 2, 3, 1])
n_classes = len(np.unique(y_test))
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
print([ | np.count_nonzero(y_train == i) | numpy.count_nonzero |
import logging
from collections import OrderedDict
from typing import Dict, Union, List
import numpy
from panda3d.bullet import BulletBoxShape, BulletRigidBodyNode, BulletGhostNode
from panda3d.core import Vec3, LQuaternionf, BitMask32, Vec4, CardMaker, TextureStage, RigidBodyCombiner, \
TransparencyAttrib, SamplerState, NodePath
from pgdrive.constants import Decoration, BodyName, CamMask
from pgdrive.scene_creator.blocks.constants import BlockDefault
from pgdrive.scene_creator.lane.abs_lane import AbstractLane, LineType, LaneNode, LineColor
from pgdrive.scene_creator.lane.circular_lane import CircularLane
from pgdrive.scene_creator.lane.straight_lane import StraightLane
from pgdrive.scene_creator.road.road import Road
from pgdrive.scene_creator.road.road_network import RoadNetwork
from pgdrive.utils.asset_loader import AssetLoader
from pgdrive.utils.coordinates_shift import panda_position
from pgdrive.utils.element import Element
from pgdrive.utils.math_utils import norm
from pgdrive.world.pg_physics_world import PGPhysicsWorld
class BlockSocket:
"""
A pair of roads in reverse direction
Positive_road is right road, and Negative road is left road on which cars drive in reverse direction
BlockSocket is a part of block used to connect other blocks
"""
def __init__(self, positive_road: Road, negative_road: Road = None):
self.positive_road = positive_road
self.negative_road = negative_road if negative_road else None
self.index = None
def set_index(self, block_name: str, index: int):
self.index = self.get_real_index(block_name, index)
@classmethod
def get_real_index(cls, block_name: str, index: int):
return "{}-socket{}".format(block_name, index)
class Block(Element, BlockDefault):
"""
Abstract class of Block,
BlockSocket: a part of previous block connecting this block
<----------------------------------------------
road_2_end <---------------------- road_2_start
<~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>
road_1_start ----------------------> road_1_end
---------------------------------------------->
BlockSocket = tuple(road_1, road_2)
When single-direction block created, road_2 in block socket is useless.
But it's helpful when a town is created.
"""
def __init__(self, block_index: int, pre_block_socket: BlockSocket, global_network: RoadNetwork, random_seed):
super(Block, self).__init__(random_seed)
# block information
assert self.ID is not None, "Each Block must has its unique ID When define Block"
assert self.SOCKET_NUM is not None, "The number of Socket should be specified when define a new block"
if block_index == 0:
from pgdrive.scene_creator.blocks import FirstBlock
assert isinstance(self, FirstBlock), "only first block can use block index 0"
elif block_index < 0:
logging.debug("It is recommended that block index should > 1")
self._block_name = str(block_index) + self.ID
self.block_index = block_index
self.number_of_sample_trial = 0
# each block contains its own road network and a global network
self._global_network = global_network
self.block_network = RoadNetwork()
# used to spawn npc
self._reborn_roads = []
# own sockets, one block derives from a socket, but will have more sockets to connect other blocks
self._sockets = OrderedDict()
# used to connect previous blocks, save its info here
self.pre_block_socket = pre_block_socket
self.pre_block_socket_index = pre_block_socket.index
# a bounding box used to improve efficiency x_min, x_max, y_min, y_max
self.bounding_box = None
# used to create this block, but for first block it is nonsense
if block_index != 0:
self.positive_lanes = self.pre_block_socket.positive_road.get_lanes(self._global_network)
self.negative_lanes = self.pre_block_socket.negative_road.get_lanes(self._global_network)
self.positive_lane_num = len(self.positive_lanes)
self.negative_lane_num = len(self.negative_lanes)
self.positive_basic_lane = self.positive_lanes[-1] # most right or outside lane is the basic lane
self.negative_basic_lane = self.negative_lanes[-1] # most right or outside lane is the basic lane
self.lane_width = self.positive_basic_lane.width_at(0)
if self.render:
# render pre-load
self.road_texture = self.loader.loadTexture(AssetLoader.file_path("textures", "sci", "color.jpg"))
self.road_texture.setMinfilter(SamplerState.FT_linear_mipmap_linear)
self.road_texture.setAnisotropicDegree(8)
self.road_normal = self.loader.loadTexture(AssetLoader.file_path("textures", "sci", "normal.jpg"))
self.ts_color = TextureStage("color")
self.ts_normal = TextureStage("normal")
self.side_texture = self.loader.loadTexture(AssetLoader.file_path("textures", "sidewalk", "color.png"))
self.side_texture.setMinfilter(SamplerState.FT_linear_mipmap_linear)
self.side_texture.setAnisotropicDegree(8)
self.side_normal = self.loader.loadTexture(AssetLoader.file_path("textures", "sidewalk", "normal.png"))
self.sidewalk = self.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
def construct_block(self, root_render_np: NodePath, pg_physics_world: PGPhysicsWorld) -> bool:
"""
Randomly Construct a block, if overlap return False
"""
self.set_config(self.PARAMETER_SPACE.sample())
success = self._sample_topology()
self._create_in_world()
self.attach_to_pg_world(root_render_np, pg_physics_world)
return success
def destruct_block(self, pg_physics_world: PGPhysicsWorld):
self._clear_topology()
self.detach_from_pg_world(pg_physics_world)
self.node_path.removeNode()
self.dynamic_nodes.clear()
self.static_nodes.clear()
def _sample_topology(self) -> bool:
"""
Sample a new topology, clear the previous settings at first
"""
self.number_of_sample_trial += 1
self._clear_topology()
no_cross = self._try_plug_into_previous_block()
self._global_network.add(self.block_network)
return no_cross
def construct_from_config(self, config: Dict, root_render_np: NodePath, pg_physics_world: PGPhysicsWorld):
assert set(config.keys()) == self.PARAMETER_SPACE.parameters, \
"Make sure the parameters' name are as same as what defined in pg_space.py"
self.set_config(config)
success = self._sample_topology()
self._create_in_world()
self.attach_to_pg_world(root_render_np, pg_physics_world)
return success
def get_socket(self, index: Union[str, int]) -> BlockSocket:
if isinstance(index, int):
if index < 0 or index >= len(self._sockets):
raise ValueError("Socket of {}: index out of range".format(self.class_name))
socket_index = list(self._sockets)[index]
else:
assert index.startswith(self._block_name)
socket_index = index
assert socket_index in self._sockets, (socket_index, self._sockets.keys())
return self._sockets[socket_index]
def add_reborn_roads(self, reborn_roads: Union[List[Road], Road]):
"""
Use this to add spawn roads instead of modifying the list directly
"""
if isinstance(reborn_roads, List):
for road in reborn_roads:
self._add_one_reborn_road(road)
elif isinstance(reborn_roads, Road):
self._add_one_reborn_road(reborn_roads)
else:
raise ValueError("Only accept List[Road] or Road in this func")
def get_reborn_roads(self):
return self._reborn_roads
def get_reborn_lanes(self):
"""
return a 2-dim array [[]] to keep the lane index
"""
ret = []
for road in self._reborn_roads:
lanes = road.get_lanes(self.block_network)
ret.append(lanes)
return ret
def add_sockets(self, sockets: Union[List[BlockSocket], BlockSocket]):
"""
Use this to add sockets instead of modifying the list directly
"""
if isinstance(sockets, BlockSocket):
self._add_one_socket(sockets)
elif isinstance(sockets, List):
for socket in sockets:
self._add_one_socket(socket)
def set_part_idx(self, x):
"""
It is necessary to divide block to some parts in complex block and give them unique id according to part idx
"""
self.PART_IDX = x
self.ROAD_IDX = 0 # clear the road idx when create new part
def add_road_node(self):
"""
Call me to get a new node name of this block.
It is more accurate and recommended to use road_node() to get a node name
"""
self.ROAD_IDX += 1
return self.road_node(self.PART_IDX, self.ROAD_IDX - 1)
def road_node(self, part_idx: int, road_idx: int) -> str:
"""
return standard road node name
"""
return self._block_name + str(part_idx) + self.DASH + str(road_idx) + self.DASH
def _add_one_socket(self, socket: BlockSocket):
assert isinstance(socket, BlockSocket), "Socket list only accept BlockSocket Type"
if socket.index is not None and not socket.index.startswith(self._block_name):
logging.warning(
"The adding socket has index {}, which is not started with this block name {}. This is dangerous! "
"Current block has sockets: {}.".format(socket.index, self._block_name, self.get_socket_indices())
)
if socket.index is None:
# if this socket is self block socket
socket.set_index(self._block_name, len(self._sockets))
self._sockets[socket.index] = socket
def _add_one_reborn_road(self, reborn_road: Road):
assert isinstance(reborn_road, Road), "Spawn roads list only accept Road Type"
self._reborn_roads.append(reborn_road)
def _clear_topology(self):
self._global_network -= self.block_network
self.block_network.graph.clear()
self.PART_IDX = 0
self.ROAD_IDX = 0
self._reborn_roads.clear()
self._sockets.clear()
def _try_plug_into_previous_block(self) -> bool:
"""
Try to plug this Block to previous block's socket, return True for success, False for road cross
"""
raise NotImplementedError
"""------------------------------------- For Render and Physics Calculation ---------------------------------- """
def _create_in_world(self):
"""
Create NodePath and Geom node to perform both collision detection and render
"""
self.lane_line_node_path = NodePath(RigidBodyCombiner(self._block_name + "_lane_line"))
self.sidewalk_node_path = NodePath(RigidBodyCombiner(self._block_name + "_sidewalk"))
self.lane_node_path = NodePath(RigidBodyCombiner(self._block_name + "_lane"))
self.lane_vis_node_path = NodePath(RigidBodyCombiner(self._block_name + "_lane_vis"))
graph = self.block_network.graph
for _from, to_dict in graph.items():
for _to, lanes in to_dict.items():
self._add_lane_surface(_from, _to, lanes)
for _id, l in enumerate(lanes):
line_color = l.line_color
self._add_lane(l, _id, line_color)
self.lane_line_node_path.flattenStrong()
self.lane_line_node_path.node().collect()
self.sidewalk_node_path.flattenStrong()
self.sidewalk_node_path.node().collect()
self.sidewalk_node_path.hide(CamMask.ScreenshotCam)
# only bodies reparent to this node
self.lane_node_path.flattenStrong()
self.lane_node_path.node().collect()
self.lane_vis_node_path.flattenStrong()
self.lane_vis_node_path.node().collect()
self.lane_vis_node_path.hide(CamMask.DepthCam | CamMask.ScreenshotCam)
self.node_path = NodePath(self._block_name)
self.node_path.hide(CamMask.Shadow)
self.sidewalk_node_path.reparentTo(self.node_path)
self.lane_line_node_path.reparentTo(self.node_path)
self.lane_node_path.reparentTo(self.node_path)
self.lane_vis_node_path.reparentTo(self.node_path)
self.bounding_box = self.block_network.get_bounding_box()
def _add_lane(self, lane: AbstractLane, lane_id: int, colors: List[Vec4]):
parent_np = self.lane_line_node_path
lane_width = lane.width_at(0)
for k, i in enumerate([-1, 1]):
line_color = colors[k]
if lane.line_types[k] == LineType.NONE or (lane_id != 0 and k == 0):
if isinstance(lane, StraightLane):
continue
elif isinstance(lane, CircularLane) and lane.radius != lane_width / 2:
# for ramp render
continue
if lane.line_types[k] == LineType.CONTINUOUS or lane.line_types[k] == LineType.SIDE:
if isinstance(lane, StraightLane):
lane_start = lane.position(0, i * lane_width / 2)
lane_end = lane.position(lane.length, i * lane_width / 2)
middle = lane.position(lane.length / 2, i * lane_width / 2)
self._add_lane_line2bullet(lane_start, lane_end, middle, parent_np, line_color, lane.line_types[k])
elif isinstance(lane, CircularLane):
segment_num = int(lane.length / Block.CIRCULAR_SEGMENT_LENGTH)
for segment in range(segment_num):
lane_start = lane.position(segment * Block.CIRCULAR_SEGMENT_LENGTH, i * lane_width / 2)
lane_end = lane.position((segment + 1) * Block.CIRCULAR_SEGMENT_LENGTH, i * lane_width / 2)
middle = (lane_start + lane_end) / 2
self._add_lane_line2bullet(
lane_start, lane_end, middle, parent_np, line_color, lane.line_types[k]
)
# for last part
lane_start = lane.position(segment_num * Block.CIRCULAR_SEGMENT_LENGTH, i * lane_width / 2)
lane_end = lane.position(lane.length, i * lane_width / 2)
middle = (lane_start + lane_end) / 2
self._add_lane_line2bullet(lane_start, lane_end, middle, parent_np, line_color, lane.line_types[k])
if lane.line_types[k] == LineType.SIDE:
radius = lane.radius if isinstance(lane, CircularLane) else 0.0
segment_num = int(lane.length / Block.SIDEWALK_LENGTH)
for segment in range(segment_num):
lane_start = lane.position(segment * Block.SIDEWALK_LENGTH, i * lane_width / 2)
lane_end = lane.position((segment + 1) * Block.SIDEWALK_LENGTH, i * lane_width / 2)
middle = (lane_start + lane_end) / 2
self._add_sidewalk2bullet(lane_start, lane_end, middle, radius, lane.direction)
# for last part
lane_start = lane.position(segment_num * Block.SIDEWALK_LENGTH, i * lane_width / 2)
lane_end = lane.position(lane.length, i * lane_width / 2)
middle = (lane_start + lane_end) / 2
if norm(lane_start[0] - lane_end[0], lane_start[1] - lane_end[1]) > 1e-1:
self._add_sidewalk2bullet(lane_start, lane_end, middle, radius, lane.direction)
elif lane.line_types[k] == LineType.BROKEN:
straight = True if isinstance(lane, StraightLane) else False
segment_num = int(lane.length / (2 * Block.STRIPE_LENGTH))
for segment in range(segment_num):
lane_start = lane.position(segment * Block.STRIPE_LENGTH * 2, i * lane_width / 2)
lane_end = lane.position(
segment * Block.STRIPE_LENGTH * 2 + Block.STRIPE_LENGTH, i * lane_width / 2
)
middle = lane.position(
segment * Block.STRIPE_LENGTH * 2 + Block.STRIPE_LENGTH / 2, i * lane_width / 2
)
self._add_lane_line2bullet(
lane_start, lane_end, middle, parent_np, line_color, lane.line_types[k], straight
)
lane_start = lane.position(segment_num * Block.STRIPE_LENGTH * 2, i * lane_width / 2)
lane_end = lane.position(lane.length + Block.STRIPE_LENGTH, i * lane_width / 2)
middle = (lane_end[0] + lane_start[0]) / 2, (lane_end[1] + lane_start[1]) / 2
self._add_lane_line2bullet(
lane_start, lane_end, middle, parent_np, line_color, lane.line_types[k], straight
)
if straight:
lane_start = lane.position(0, i * lane_width / 2)
lane_end = lane.position(lane.length, i * lane_width / 2)
middle = lane.position(lane.length / 2, i * lane_width / 2)
self._add_box_body(lane_start, lane_end, middle, parent_np, lane.line_types[k], line_color)
def _add_box_body(self, lane_start, lane_end, middle, parent_np: NodePath, line_type, line_color):
length = norm(lane_end[0] - lane_start[0], lane_end[1] - lane_start[1])
if LineType.prohibit(line_type):
node_name = BodyName.White_continuous_line if line_color == LineColor.GREY else BodyName.Yellow_continuous_line
else:
node_name = BodyName.Broken_line
body_node = BulletGhostNode(node_name)
body_node.setActive(False)
body_node.setKinematic(False)
body_node.setStatic(True)
body_np = parent_np.attachNewNode(body_node)
shape = BulletBoxShape(Vec3(length / 2, Block.LANE_LINE_WIDTH / 2, Block.LANE_LINE_GHOST_HEIGHT))
body_np.node().addShape(shape)
mask = Block.CONTINUOUS_COLLISION_MASK if line_type != LineType.BROKEN else Block.BROKEN_COLLISION_MASK
body_np.node().setIntoCollideMask(BitMask32.bit(mask))
self.dynamic_nodes.append(body_np.node())
body_np.setPos(panda_position(middle, Block.LANE_LINE_GHOST_HEIGHT / 2))
direction_v = lane_end - lane_start
theta = -numpy.arctan2(direction_v[1], direction_v[0])
body_np.setQuat(LQuaternionf(numpy.cos(theta / 2), 0, 0, numpy.sin(theta / 2)))
def _add_lane_line2bullet(
self,
lane_start,
lane_end,
middle,
parent_np: NodePath,
color: Vec4,
line_type: LineType,
straight_stripe=False
):
length = norm(lane_end[0] - lane_start[0], lane_end[1] - lane_start[1])
if length <= 0:
return
if LineType.prohibit(line_type):
node_name = BodyName.White_continuous_line if color == LineColor.GREY else BodyName.Yellow_continuous_line
else:
node_name = BodyName.Broken_line
# add bullet body for it
if straight_stripe:
body_np = parent_np.attachNewNode(node_name)
else:
body_node = BulletGhostNode(node_name)
body_node.setActive(False)
body_node.setKinematic(False)
body_node.setStatic(True)
body_np = parent_np.attachNewNode(body_node)
# its scale will change by setScale
body_height = Block.LANE_LINE_GHOST_HEIGHT
shape = BulletBoxShape(
Vec3(length / 2 if line_type != LineType.BROKEN else length, Block.LANE_LINE_WIDTH / 2, body_height)
)
body_np.node().addShape(shape)
mask = Block.CONTINUOUS_COLLISION_MASK if line_type != LineType.BROKEN else Block.BROKEN_COLLISION_MASK
body_np.node().setIntoCollideMask(BitMask32.bit(mask))
self.dynamic_nodes.append(body_np.node())
# position and heading
body_np.setPos(panda_position(middle, Block.LANE_LINE_GHOST_HEIGHT / 2))
direction_v = lane_end - lane_start
theta = -numpy.arctan2(direction_v[1], direction_v[0])
body_np.setQuat(LQuaternionf(numpy.cos(theta / 2), 0, 0, numpy.sin(theta / 2)))
if self.render:
# For visualization
lane_line = self.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
lane_line.setScale(length, Block.LANE_LINE_WIDTH, Block.LANE_LINE_THICKNESS)
lane_line.setPos(Vec3(0, 0 - Block.LANE_LINE_GHOST_HEIGHT / 2))
lane_line.reparentTo(body_np)
body_np.set_color(color)
def _add_sidewalk2bullet(self, lane_start, lane_end, middle, radius=0.0, direction=0):
length = norm(lane_end[0] - lane_start[0], lane_end[1] - lane_start[1])
body_node = BulletRigidBodyNode(BodyName.Sidewalk)
body_node.setActive(False)
body_node.setKinematic(False)
body_node.setStatic(True)
side_np = self.sidewalk_node_path.attachNewNode(body_node)
shape = BulletBoxShape(Vec3(1 / 2, 1 / 2, 1 / 2))
body_node.addShape(shape)
body_node.setIntoCollideMask(BitMask32.bit(self.CONTINUOUS_COLLISION_MASK))
self.dynamic_nodes.append(body_node)
if radius == 0:
factor = 1
else:
if direction == 1:
factor = (1 - self.SIDEWALK_LINE_DIST / radius)
else:
factor = (1 + self.SIDEWALK_WIDTH / radius) * (1 + self.SIDEWALK_LINE_DIST / radius)
direction_v = lane_end - lane_start
vertical_v = (-direction_v[1], direction_v[0]) / numpy.linalg.norm(direction_v)
middle += vertical_v * (self.SIDEWALK_WIDTH / 2 + self.SIDEWALK_LINE_DIST)
side_np.setPos(panda_position(middle, 0))
theta = -numpy.arctan2(direction_v[1], direction_v[0])
side_np.setQuat(LQuaternionf( | numpy.cos(theta / 2) | numpy.cos |
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
# plt.style.use('seaborn-colorblind')
# plt.style.use('grayscale')
from tqdm import tqdm
def gauss(x, mu=0, sigma=1):
return (
1 / np.sqrt(2 * np.pi * sigma ** 2) * np.exp(-0.5 * (x - mu) ** 2 / sigma ** 2)
)
def f(x):
return (1 + x ** 2) ** (-0.5) * gauss(x)
if __name__ == "__main__":
np.random.seed(2019)
lower, upper = -5, 5
exact = 0.78964
V = upper - lower
Ns = | np.logspace(3, 6, 50, dtype=int) | numpy.logspace |
# -*- coding: utf-8 -*-
"""
pytests for resource handlers
"""
from datetime import datetime
import h5py
import numpy as np
import os
import pandas as pd
import pytest
from rex import TESTDATADIR
from rex.multi_file_resource import (MultiH5, MultiH5Path, MultiFileNSRDB,
MultiFileWTK)
from rex.renewable_resource import (NSRDB, WindResource)
from rex.utilities.exceptions import ResourceKeyError, ResourceRuntimeError
def NSRDB_res():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')
return NSRDB(path)
def NSRDB_2018():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb', 'nsrdb*2018.h5')
return MultiFileNSRDB(path)
def NSRDB_2018_list():
"""
Init NSRDB resource handler
"""
path = os.path.join(TESTDATADIR, 'nsrdb/nsrdb*2018.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileNSRDB(h5_files)
def WindResource_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
return WindResource(path)
def FiveMinWind_res():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk', 'wtk*m.h5')
return MultiFileWTK(path)
def FiveMinWind_list():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/wtk*m.h5')
path, h5_files = MultiH5Path._get_h5_files(path)
return MultiFileWTK(h5_files)
def wind_group():
"""
Init WindResource resource handler
"""
path = os.path.join(TESTDATADIR, 'wtk/ri_wtk_2012_group.h5')
return WindResource(path, group='group')
def check_res(res_cls):
"""
Run test on len and shape methods
"""
time_index = res_cls.time_index
meta = res_cls.meta
res_shape = (len(time_index), len(meta))
assert len(res_cls) == len(time_index)
assert res_cls.shape == res_shape
assert np.all(np.isin(['meta', 'time_index'],
res_cls.datasets))
assert np.all(~np.isin(['meta', 'time_index', 'coordinates'],
res_cls.resource_datasets))
def check_attrs(res_cls, dset):
"""
Check dataset attributes extraction
"""
truth = res_cls.get_attrs(dset=dset)
test = res_cls.attrs[dset]
msg = "{} attributes do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_scale_factor(dset)
test = res_cls.scale_factors[dset]
msg = "{} scale factors do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_units(dset)
test = res_cls.units[dset]
msg = "{} units do not match!".format(dset)
assert truth == test, msg
def check_properties(res_cls, dset):
"""
Check dataset properties extraction
"""
shape, dtype, chunks = res_cls.get_dset_properties(dset)
test = res_cls.shapes[dset]
msg = "{} shape does not match!".format(dset)
assert shape == test, msg
test = res_cls.dtypes[dset]
msg = "{} dtype does not match!".format(dset)
assert dtype == test, msg
test = res_cls.chunks[dset]
msg = "{} chunks do not match!".format(dset)
assert chunks == test, msg
def check_meta(res_cls):
"""
Run tests on meta data
"""
with h5py.File(res_cls.h5_file, 'r') as f:
ds_name = 'meta'
if res_cls._group:
ds_name = '{}/{}'.format(res_cls._group, ds_name)
baseline = pd.DataFrame(f[ds_name][...])
sites = slice(0, len(baseline))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = len(baseline)
sites = slice(int(sites / 3), int(sites / 2))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = 5
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
sites = sorted(np.random.choice(len(baseline), 5, replace=False))
meta = res_cls['meta', sites]
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values[sites], meta[cols].values)
meta = res_cls['meta']
cols = ['latitude', 'longitude', 'elevation', 'timezone']
assert np.allclose(baseline[cols].values, meta[cols].values)
assert isinstance(meta, pd.DataFrame)
meta_shape = meta.shape
max_sites = int(meta_shape[0] * 0.8)
# single site
meta = res_cls['meta', max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (1, meta_shape[1])
# site slice
meta = res_cls['meta', :max_sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (max_sites, meta_shape[1])
# site list
sites = sorted(np.random.choice(meta_shape[0], max_sites, replace=False))
meta = res_cls['meta', sites]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (len(sites), meta_shape[1])
# select columns
meta = res_cls['meta', :, ['latitude', 'longitude']]
assert isinstance(meta, pd.DataFrame)
assert meta.shape == (meta_shape[0], 2)
lat_lon = res_cls.lat_lon
assert np.allclose(baseline[['latitude', 'longitude']].values, lat_lon)
def check_time_index(res_cls):
"""
Run tests on time_index
"""
time_index = res_cls['time_index']
time_shape = time_index.shape
assert isinstance(time_index, pd.DatetimeIndex)
assert str(time_index.tz) == 'UTC'
# single timestep
time_index = res_cls['time_index', 50]
assert isinstance(time_index, datetime)
# time slice
time_index = res_cls['time_index', 100:200]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (100,)
# list of timesteps
steps = sorted(np.random.choice(time_shape[0], 50, replace=False))
time_index = res_cls['time_index', steps]
assert isinstance(time_index, pd.DatetimeIndex)
assert time_index.shape == (50,)
def check_dset(res_cls, ds_name):
"""
Run tests on dataset ds_name
"""
ds_shape = res_cls.shape
max_sites = int(ds_shape[1] * 0.8)
arr = res_cls[ds_name]
ds = res_cls[ds_name]
assert isinstance(ds, np.ndarray)
assert ds.shape == ds_shape
assert np.allclose(arr, ds)
# single site all time
ds = res_cls[ds_name, :, 1]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0],)
# single time all sites
ds = res_cls[ds_name, 10]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[1],)
assert np.allclose(arr[10], ds)
# single value
ds = res_cls[ds_name, 10, max_sites]
assert isinstance(ds, (np.integer, np.floating))
assert np.allclose(arr[10, max_sites], ds)
# site slice
sites = slice(int(max_sites / 2), max_sites)
ds = res_cls[ds_name, :, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0], sites.stop - sites.start)
assert np.allclose(arr[:, sites], ds)
# time slice
ds = res_cls[ds_name, 10:20]
assert isinstance(ds, np.ndarray)
assert ds.shape == (10, ds_shape[1])
assert np.allclose(arr[10:20], ds)
# slice in time and space
ds = res_cls[ds_name, 100:200, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (100, sites.stop - sites.start)
assert np.allclose(arr[100:200, sites], ds)
# site list
sites = sorted(np.random.choice(ds_shape[1], max_sites, replace=False))
ds = res_cls[ds_name, :, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0], len(sites))
assert np.allclose(arr[:, sites], ds)
# site list single time
sites = sorted(np.random.choice(ds_shape[1], max_sites, replace=False))
ds = res_cls[ds_name, 0, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (len(sites),)
assert np.allclose(arr[0, sites], ds)
# time list
times = sorted(np.random.choice(ds_shape[0], 100, replace=False))
ds = res_cls[ds_name, times]
assert isinstance(ds, np.ndarray)
assert ds.shape == (100, ds_shape[1])
assert np.allclose(arr[times], ds)
# time list single site
ds = res_cls[ds_name, times, 0]
assert isinstance(ds, np.ndarray)
assert ds.shape == (100,)
assert np.allclose(arr[times, 0], ds)
# boolean mask
mask = res_cls.time_index.month == 7
ds = res_cls[ds_name, mask]
assert isinstance(ds, np.ndarray)
assert ds.shape == (mask.sum(), ds_shape[1])
assert np.allclose(arr[mask], ds)
# time and site lists
with pytest.raises(IndexError):
assert res_cls[ds_name, times, sites]
def check_dset_handler(res_cls, ds_name):
"""
Run tests on dataset ds_name
"""
ds_shape = res_cls.shape
max_sites = int(ds_shape[1] * 0.8)
dset = res_cls.open_dataset(ds_name)
arr = dset[...]
ds = res_cls[ds_name]
assert isinstance(ds, np.ndarray)
assert ds.shape == ds_shape
assert np.allclose(arr, ds)
# single site all time
ds = dset[:, 1]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0],)
# single time all sites
ds = dset[10]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[1],)
assert np.allclose(arr[10], ds)
# single value
ds = dset[10, max_sites]
assert isinstance(ds, (np.integer, np.floating))
assert np.allclose(arr[10, max_sites], ds)
# site slice
sites = slice(int(max_sites / 2), max_sites)
ds = dset[:, sites]
assert isinstance(ds, np.ndarray)
assert ds.shape == (ds_shape[0], sites.stop - sites.start)
assert np.allclose(arr[:, sites], ds)
# time slice
ds = dset[10:20]
assert isinstance(ds, np.ndarray)
assert ds.shape == (10, ds_shape[1])
assert | np.allclose(arr[10:20], ds) | numpy.allclose |
import numpy as np
from scipy.special import erf
from scipy.interpolate import interp1d
from scipy.integrate import quad,cumtrapz,trapz,solve_ivp
from scipy.optimize import fmin,brentq,fixed_point
import scipy.linalg as linalg
import time
# helper function for the peak number density (BBKS 1986)
def f(x):
return np.where(x>.03,
.5*(x**3-3*x)*(erf(2.5**.5*x)+erf(2.5**.5*.5*x))+(2./(5*np.pi))**.5*((7.75*x**2+1.6)*np.exp(-.625*x**2)+(.5*x**2-1.6)*np.exp(-2.5*x**2)),
3**5*5**1.5/(7*2**11*(2*np.pi)**.5)*x**8*(1-.625*x**2)
)
# moments of the power spectrum
def sigmaj2(j,k,Pk):
integrand = Pk*k**(2*j)
return trapz(integrand,x=np.log(k),axis=0)
def sigmaj(j,k,Pk):
return np.sqrt(sigmaj2(j,k,Pk))
# ellipsoidal collapse threshold
def ec_func(f,e,p):
return 1+0.47*(5*(e**2-p*np.abs(p))*f**2)**0.615
def ec_scale(e,p):
func = lambda f: ec_func(f,e,p)
try:
return fixed_point(func,1.1,maxiter=25)
except:
return 0
dc = 3./5*(3*np.pi/2)**(2./3) # linear delta at collapse
dv = 3./5*(3./4 + 9*np.pi/8)**(2./3) # linear delta at virialization
dt = 3./5*(3*np.pi/4)**(2./3) # linear delta at turnaround
# window functions
def sinc(x):
return np.where(x > 0.1, np.divide(np.sin(x),x,where=x>0.1), 1. - x**2/6. + x**4/120. - x**6/5040. + x**8/362880.)
def W(x):
return 3*np.where(x > 0.1, np.divide(np.sin(x)-x*np.cos(x),x**3,where=x>0.1), 1./3. - x**2/30. + x**4/840. - x**6/45360. + x**8/3991680.)
# peak ellipticity/prolateness distributions
def fep(e,p,nu):
return 1125./np.sqrt(10*np.pi)*e*(e**2-p**2)*nu**5*np.exp(-5./2*nu**2*(3*e**2+p**2))
def fe(e,nu):
return 45*e*np.exp(-10*e**2*nu**2)*nu**2*(e* | np.sqrt(10./np.pi) | numpy.sqrt |
from __future__ import print_function, division
import warnings
warnings.filterwarnings("ignore")
import os.path
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import dlib
import os, re
import argparse
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return (x, y, w, h)
def detect_face(image_paths, SAVE_DETECTED_AT, default_max_size=800,size = 300, padding = 0.25):
cnn_face_detector = dlib.cnn_face_detection_model_v1('dlib_models/mmod_human_face_detector.dat')
sp = dlib.shape_predictor('dlib_models/shape_predictor_5_face_landmarks.dat')
base = 2000 # largest width and height
for index, image_path in enumerate(image_paths):
if index % 1000 == 0:
print('---%d/%d---' %(index, len(image_paths)))
img = dlib.load_rgb_image(image_path)
old_height, old_width, _ = img.shape
if old_width > old_height:
new_width, new_height = default_max_size, int(default_max_size * old_height / old_width)
else:
new_width, new_height = int(default_max_size * old_width / old_height), default_max_size
img = dlib.resize_image(img, rows=new_height, cols=new_width)
dets = cnn_face_detector(img, 1)
num_faces = len(dets)
if num_faces == 0:
print("Sorry, there were no faces found in '{}'".format(image_path))
continue
# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
rect = detection.rect
faces.append(sp(img, rect))
images = dlib.get_face_chips(img, faces, size=size, padding = padding)
for idx, image in enumerate(images):
img_name = image_path.split("/")[-1]
path_sp = img_name.split(".")
face_name = os.path.join(SAVE_DETECTED_AT, path_sp[0] + "_" + "face" + str(idx) + "." + path_sp[-1])
dlib.save_image(image, face_name)
def predidct_age_gender_race(save_prediction_at, imgs_path = 'cropped_faces/'):
img_names = [os.path.join(imgs_path, x) for x in os.listdir(imgs_path)] # ! not sorted with python default "sorted"
img_names = sorted(img_names) # ! python sort is not true natural order sorting
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_fair_7 = torchvision.models.resnet34(pretrained=True)
model_fair_7.fc = nn.Linear(model_fair_7.fc.in_features, 18)
model_fair_7.load_state_dict(torch.load('fair_face_models/res34_fair_align_multi_7_20190809.pt')) # fairface_alldata_20191111
model_fair_7 = model_fair_7.to(device)
model_fair_7.eval()
model_fair_4 = torchvision.models.resnet34(pretrained=True)
model_fair_4.fc = nn.Linear(model_fair_4.fc.in_features, 18)
model_fair_4.load_state_dict(torch.load('fair_face_models/fairface_alldata_4race_20191111.pt'))
model_fair_4 = model_fair_4.to(device)
model_fair_4.eval()
trans = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# img pth of face images
face_names = []
# list within a list. Each sublist contains scores for all races. Take max for predicted race
race_scores_fair = []
gender_scores_fair = []
age_scores_fair = []
race_preds_fair = []
gender_preds_fair = []
age_preds_fair = []
race_scores_fair_4 = []
race_preds_fair_4 = []
for index, img_name in enumerate(img_names):
if index % 1000 == 0:
print("Predicting... {}/{}".format(index, len(img_names)))
face_names.append(img_name)
image = dlib.load_rgb_image(img_name)
image = trans(image)
image = image.view(1, 3, 224, 224) # reshape image to match model dimensions (1 batch size)
image = image.to(device)
# fair
outputs = model_fair_7(image)
outputs = outputs.cpu().detach().numpy()
outputs = np.squeeze(outputs)
race_outputs = outputs[:7]
gender_outputs = outputs[7:9]
age_outputs = outputs[9:18]
race_score = np.exp(race_outputs) / np.sum(np.exp(race_outputs))
gender_score = np.exp(gender_outputs) / np.sum(np.exp(gender_outputs))
age_score = np.exp(age_outputs) / np.sum(np.exp(age_outputs))
race_pred = np.argmax(race_score)
gender_pred = np.argmax(gender_score)
age_pred = np.argmax(age_score)
race_scores_fair.append(race_score)
gender_scores_fair.append(gender_score)
age_scores_fair.append(age_score)
race_preds_fair.append(race_pred)
gender_preds_fair.append(gender_pred)
age_preds_fair.append(age_pred)
# fair 4 class
outputs = model_fair_4(image)
outputs = outputs.cpu().detach().numpy()
outputs = np.squeeze(outputs)
race_outputs = outputs[:4]
race_score = np.exp(race_outputs) / np.sum(np.exp(race_outputs))
race_pred = np.argmax(race_score)
race_scores_fair_4.append(race_score)
race_preds_fair_4.append(race_pred)
# !!! end prediction
result = pd.DataFrame([face_names,
race_preds_fair,
race_preds_fair_4,
gender_preds_fair,
age_preds_fair,
race_scores_fair, race_scores_fair_4,
gender_scores_fair,
age_scores_fair, ]).T
result.columns = ['face_name_align',
'race_preds_fair',
'race_preds_fair_4',
'gender_preds_fair',
'age_preds_fair',
'race_scores_fair',
'race_scores_fair_4',
'gender_scores_fair',
'age_scores_fair']
# ! race with 7 labels
result.loc[result['race_preds_fair'] == 0, 'race'] = 'White'
result.loc[result['race_preds_fair'] == 1, 'race'] = 'Black'
result.loc[result['race_preds_fair'] == 2, 'race'] = 'Latino_Hispanic'
result.loc[result['race_preds_fair'] == 3, 'race'] = 'East Asian'
result.loc[result['race_preds_fair'] == 4, 'race'] = 'Southeast Asian'
result.loc[result['race_preds_fair'] == 5, 'race'] = 'Indian'
result.loc[result['race_preds_fair'] == 6, 'race'] = 'Middle Eastern'
# race fair 4
result.loc[result['race_preds_fair_4'] == 0, 'race4'] = 'White'
result.loc[result['race_preds_fair_4'] == 1, 'race4'] = 'Black'
result.loc[result['race_preds_fair_4'] == 2, 'race4'] = 'Asian'
result.loc[result['race_preds_fair_4'] == 3, 'race4'] = 'Indian'
# gender
result.loc[result['gender_preds_fair'] == 0, 'gender'] = 'Male'
result.loc[result['gender_preds_fair'] == 1, 'gender'] = 'Female'
# age
result.loc[result['age_preds_fair'] == 0, 'age'] = '0-2'
result.loc[result['age_preds_fair'] == 1, 'age'] = '3-9'
result.loc[result['age_preds_fair'] == 2, 'age'] = '10-19'
result.loc[result['age_preds_fair'] == 3, 'age'] = '20-29'
result.loc[result['age_preds_fair'] == 4, 'age'] = '30-39'
result.loc[result['age_preds_fair'] == 5, 'age'] = '40-49'
result.loc[result['age_preds_fair'] == 6, 'age'] = '50-59'
result.loc[result['age_preds_fair'] == 7, 'age'] = '60-69'
result.loc[result['age_preds_fair'] == 8, 'age'] = '70+'
result[['face_name_align',
'race', 'race4',
'gender', 'age',
'race_scores_fair', 'race_scores_fair_4',
'gender_scores_fair', 'age_scores_fair']].to_csv(save_prediction_at, index=False)
print("saved results at ", save_prediction_at)
# ! convert scores into csv style
race_scores_fair_np = | np.array(race_scores_fair) | numpy.array |
"""
Classes for GP models with Stan
"""
from argparse import Namespace
import time
import numpy as np
import copy
from bo.pp.pp_core import DiscPP
import bo.pp.stan.gp_hier2 as gpstan2
import bo.pp.stan.gp_hier3 as gpstan3
import bo.pp.stan.gp_hier2_matern as gpstan2_matern
from bo.pp.gp.gp_utils import kern_exp_quad, kern_matern32, \
get_cholesky_decomp, solve_upper_triangular, solve_lower_triangular, \
sample_mvn
from bo.util.print_utils import suppress_stdout_stderr
class StanGpPP(DiscPP):
""" Hierarchical GPs implemented with Stan """
def __init__(self, data=None, modelp=None, printFlag=True):
""" Constructor """
self.set_model_params(modelp)
self.set_data(data)
self.ndimx = self.modelp.ndimx
self.set_model()
super(StanGpPP,self).__init__()
if printFlag:
self.print_str()
def set_model_params(self,modelp):
if modelp is None:
modelp = Namespace(ndimx=1, model_str='optfixedsig',
gp_mean_transf_str='constant')
if modelp.model_str=='optfixedsig':
modelp.kernp = Namespace(u1=.1, u2=5., n1=10., n2=10., sigma=1e-5)
modelp.infp = Namespace(niter=1000)
elif modelp.model_str=='opt' or modelp.model_str=='optmatern32':
modelp.kernp = Namespace(ig1=1., ig2=5., n1=10., n2=20., n3=.01,
n4=.01)
modelp.infp = Namespace(niter=1000)
elif modelp.model_str=='samp' or modelp.model_str=='sampmatern32':
modelp.kernp = Namespace(ig1=1., ig2=5., n1=10., n2=20., n3=.01,
n4=.01)
modelp.infp = Namespace(niter=1500, nwarmup=500)
self.modelp = modelp
def set_data(self, data):
""" Set self.data """
if data is None:
pass #TODO: handle case where there's no data
self.data_init = copy.deepcopy(data)
self.data = self.get_transformed_data(self.data_init,
self.modelp.gp_mean_transf_str)
def get_transformed_data(self, data, transf_str='linear'):
""" Transform data, for non-zero-mean GP """
newdata = Namespace(X=data.X)
if transf_str=='linear':
mmat,_,_,_ = np.linalg.lstsq(np.concatenate([data.X,
np.ones((data.X.shape[0],1))],1), data.y.flatten(), rcond=None)
self.gp_mean_vec = lambda x: np.matmul(np.concatenate([x,
np.ones((x.shape[0],1))],1), mmat)
newdata.y = data.y - self.gp_mean_vec(data.X).reshape(-1,1)
if transf_str=='constant':
yconstant = data.y.mean()
#yconstant = 0.
self.gp_mean_vec = lambda x: np.array([yconstant for xcomp in x])
newdata.y = data.y - self.gp_mean_vec(data.X).reshape(-1,1)
return newdata
def set_model(self):
""" Set GP regression model """
self.model = self.get_model()
def get_model(self):
""" Returns GPRegression model """
if self.modelp.model_str=='optfixedsig':
return gpstan3.get_model(print_status=False)
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return gpstan2.get_model(print_status=False)
elif self.modelp.model_str=='optmatern32' or \
self.modelp.model_str=='sampmatern32':
return gpstan2_matern.get_model(print_status=False)
def infer_post_and_update_samples(self, seed=5000012, print_result=False):
""" Update self.sample_list """
data_dict = self.get_stan_data_dict()
with suppress_stdout_stderr():
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' \
or self.modelp.model_str=='optmatern32':
stanout = self.model.optimizing(data_dict, iter=self.modelp.infp.niter,
#seed=seed, as_vector=True, algorithm='Newton')
seed=seed, as_vector=True, algorithm='LBFGS')
elif self.modelp.model_str=='samp' or self.modelp.model_str=='sampmatern32':
stanout = self.model.sampling(data_dict, iter=self.modelp.infp.niter +
self.modelp.infp.nwarmup, warmup=self.modelp.infp.nwarmup, chains=1,
seed=seed, refresh=1000)
print('-----')
self.sample_list = self.get_sample_list_from_stan_out(stanout)
if print_result: self.print_inference_result()
def get_stan_data_dict(self):
""" Return data dict for stan sampling method """
if self.modelp.model_str=='optfixedsig':
return {'u1':self.modelp.kernp.u1, 'u2':self.modelp.kernp.u2,
'n1':self.modelp.kernp.n1, 'n2':self.modelp.kernp.n2,
'sigma':self.modelp.kernp.sigma, 'D':self.ndimx,
'N':len(self.data.X), 'x':self.data.X, 'y':self.data.y.flatten()}
elif self.modelp.model_str=='opt' or self.modelp.model_str=='samp':
return {'ig1':self.modelp.kernp.ig1, 'ig2':self.modelp.kernp.ig2,
'n1':self.modelp.kernp.n1, 'n2':self.modelp.kernp.n2,
'n3':self.modelp.kernp.n3, 'n4':self.modelp.kernp.n4,
'D':self.ndimx, 'N':len(self.data.X), 'x':self.data.X,
'y':self.data.y.flatten()}
elif self.modelp.model_str=='optmatern32' or \
self.modelp.model_str=='sampmatern32':
return {'ig1':self.modelp.kernp.ig1, 'ig2':self.modelp.kernp.ig2,
'n1':self.modelp.kernp.n1, 'n2':self.modelp.kernp.n2,
'n3':self.modelp.kernp.n3, 'n4':self.modelp.kernp.n4,
'D':self.ndimx, 'N':len(self.data.X), 'x':self.data.X,
'y':self.data.y.flatten(), 'covid':2}
def get_sample_list_from_stan_out(self, stanout):
""" Convert stan output to sample_list """
if self.modelp.model_str=='optfixedsig':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=self.modelp.kernp.sigma)]
elif self.modelp.model_str=='opt' or self.modelp.model_str=='optmatern32':
return [Namespace(ls=stanout['rho'], alpha=stanout['alpha'],
sigma=stanout['sigma'])]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
sdict = stanout.extract(['rho','alpha','sigma'])
return [Namespace(ls=sdict['rho'][i], alpha=sdict['alpha'][i],
sigma=sdict['sigma'][i]) for i in range(sdict['rho'].shape[0])]
def print_inference_result(self):
""" Print results of stan inference """
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='optmatern32':
print('*ls pt est = '+str(self.sample_list[0].ls)+'.')
print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')
print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
ls_arr = np.array([ns.ls for ns in self.sample_list])
alpha_arr = np.array([ns.alpha for ns in self.sample_list])
sigma_arr = np.array([ns.sigma for ns in self.sample_list])
print('*ls mean = '+str(ls_arr.mean())+'.')
print('*ls std = '+str(ls_arr.std())+'.')
print('*alpha mean = '+str(alpha_arr.mean())+'.')
print('*alpha std = '+str(alpha_arr.std())+'.')
print('*sigma mean = '+str(sigma_arr.mean())+'.')
print('*sigma std = '+str(sigma_arr.std())+'.')
print('-----')
def sample_pp_post_pred(self, nsamp, input_list, full_cov=False, nloop=None):
""" Sample from posterior predictive of PP.
Inputs:
input_list - list of np arrays size=(-1,)
Returns:
list (len input_list) of np arrays (size=(nsamp,1))."""
if self.modelp.model_str=='optfixedsig' or self.modelp.model_str=='opt' or \
self.modelp.model_str=='optmatern32':
nloop = 1
sampids = [0]
elif self.modelp.model_str=='samp' or \
self.modelp.model_str=='sampmatern32':
if nloop is None: nloop=nsamp
nsamp = int(nsamp/nloop)
sampids = np.random.randint(len(self.sample_list), size=(nloop,))
ppred_list = []
for i in range(nloop):
samp = self.sample_list[sampids[i]]
postmu, postcov = self.gp_post(self.data.X, self.data.y,
np.stack(input_list), samp.ls, samp.alpha, samp.sigma, full_cov)
if full_cov:
ppred_list.extend(list(sample_mvn(postmu, postcov, nsamp)))
else:
ppred_list.extend(list(np.random.normal(postmu.reshape(-1,),
postcov.reshape(-1,), size=(nsamp, len(input_list)))))
return self.get_reverse_transform(list( | np.stack(ppred_list) | numpy.stack |
'''
Python script to calculate statistic functions
like the autocorrelation function (ACF), the second-order structure function (SF)
and so on.
Developed by <NAME>.
7/23/2021
8/19/2021
'''
# modules
import numpy as np
from scipy.fft import fft, ifft, fftn, ifftn, fftfreq, fftshift, ifftshift
from scipy.fft import rfftfreq
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
#import seaborn as sns
#sns.set_palette('gist_earth')
# Class StatsVF
class StatsVfield():
def __init__(self, data, axes, derr=[]) -> None:
self.data = data
self.datashape = data.shape
self.ndim = len(data.shape)
self.derr = derr
if type(axes) == list:
if len(axes) != self.ndim:
print ('ERROR: Dimension of given data and axes do not match.')
return
elif type(axes).__name__ == 'ndarray':
if len(axes.shape) != self.ndim:
print ('ERROR: Dimension of given data and axes do not match.')
return
else:
print ('ERROR: axes must be list or ndarray containing xi, or ndarray of x.')
return
if self.ndim == 1:
self.nx = self.datashape[0]
if type(axes) == list:
self.x = axes[0]
elif type(axes).__name__ == 'ndarray':
self.x = axes
self.dx = self.x[1] - self.x[0]
elif self.ndim == 2:
self.nx, self.ny = self.datashape
self.x, self.y = axes
self.dx = self.x[1] - self.x[0]
self.dy = self.y[1] - self.y[0]
elif self.ndim == 3:
self.nx, self.ny, self.nz = self.datashape
self.x, self.y, self.z = axes
self.dx = self.x[1] - self.x[0]
self.dy = self.y[1] - self.y[0]
self.dz = self.z[1] - self.z[0]
elif self.ndim > 3:
print ('ERROR: Dimension must be <= 3.')
return
self.acf = []
self.sf = []
self.tau_x = []
def calc_sf(self, p_order=2):
'''
Calculate the second-order structure function (SF).
Other orders will be supported in future.
Usage
-----
vf = StatsVfield(data, axes)
vf.calc_sf()
vf.sf # call the calculated SF
Parameters
----------
- p_order: Order of the structuer function. Currently not used.
'''
if self.ndim == 1:
if len(self.derr) == 0:
self.sf = sf_1d(self.data)
else:
self.sf, self.sf_err = sf_1d(self.data, derr=self.derr)
elif self.ndim == 2:
if len(self.derr) == 0:
self.sf = sf_2d(self.data)
else:
self.sf, self.sf_err = sf_2d(self.data, derr=self.derr)
elif self.ndim == 3:
print ('3D is being developed.')
return
self.get_tau(realfreq=True)
def calc_ac(self, method='FFT', realfreq=False):
'''
Calculate autocorrelation (AC).
Usage
-----
vf = StatsVfield(data, axes)
vf.calc_ac()
vf.acf # call the calculated ACF
Parameters
----------
- method: Calculation ways; FFT or iterative. FFT mode uses Fast Fourier Transform, while
iterative mode calculates ACF iteratively sliding an input data set.
- realfreq: If True, only ACF within positive tau will be return. Option only for in one-dimensional data set.
'''
if self.ndim == 1:
if method == 'FFT':
self.acf = ac_fft1(self.data, realfreq=realfreq)
elif method == 'iterative':
if len(self.derr) == 0:
self.acf = ac_1d(self.data, realfreq=realfreq)
else:
self.acf, self.acf_err = ac_1d(self.data, derr=self.derr, realfreq=realfreq)
elif self.ndim == 2:
if method == 'FFT':
self.acf = ac_fft2(self.data)
elif method == 'iterative':
if len(self.derr) == 0:
self.acf = ac_2d(self.data)
else:
self.acf, self.acf_err = ac_2d(self.data, derr=self.derr)
#if len(self.tau_x) == 0:
self.get_tau(realfreq=realfreq)
def calc_ps(self, method='FFT', realfreq=False):
'''
Calculate power-spectrum (PS). Still under development.
Usage
-----
Coming soon..
'''
if self.ndim == 1:
self.ps = pspec_1d(self.data, realfreq=realfreq)
elif self.ndim == 2:
print ('Still being developed, sorry.')
#self.ps = pspec_2d(self.data, realfreq=realfreq)
if realfreq:
self.freq_x = rfftfreq(self.nx + self.nx - 1, self.dx) # nx -1 is for zero-padding
else:
self.freq_x = fftshift(fftfreq(self.nx + self.nx - 1, self.dx))
#print(len(self.ps), len(self.freq_x))
def get_tau(self, realfreq=False):
'''
Get tau for ACF and SF.
Parameters
----------
- realfreq: For one-dimensional data set, if True, only positive tau will be returned.
'''
if self.ndim == 1:
if realfreq:
self.tau_x = np.arange(0, self.nx, 1)*self.dx
else:
self.tau_x = np.concatenate([np.arange(-(self.nx - 1), 0, 1)*self.dx, np.arange(0, self.nx, 1)*self.dx])
elif self.ndim == 2:
self.tau_x = np.concatenate([np.arange(-(self.nx - 1), 0, 1)*self.dx, np.arange(0, self.nx, 1)*self.dx])
self.tau_y = np.concatenate([np.arange(-(self.ny - 1), 0, 1)*self.dy, np.arange(0, self.ny, 1)*self.dy])
elif self.ndim == 3:
print ('3D is being developed.')
return
def collapse(self):
if self.ndim == 1:
print ('Data is one dimensional. No more collapse.')
return
elif self.ndim == 2:
tau_xx, tau_yy = np.meshgrid(self.tau_x, self.tau_y)
tau_rr = np.sqrt(tau_xx*tau_xx + tau_yy*tau_yy)
tau_sort = np.unique(tau_rr)
self.tau_col = tau_sort
if len(self.acf) != 0:
self.acf_col = np.array([
np.nanmean(self.acf[tau_rr == tau_i]) for tau_i in tau_sort])
self.acf_err_col = np.array([
np.sqrt(np.nansum(self.acf_err[tau_rr == tau_i]**2))/np.count_nonzero(~np.isnan(self.acf_err[tau_rr == tau_i]))
for tau_i in tau_sort])
if len(self.sf) !=0:
self.sf_col = np.array([
np.nanmean(self.sf[tau_rr == tau_i]) for tau_i in tau_sort])
self.sf_err_col = np.array([
np.sqrt(np.nansum(self.sf_err[tau_rr == tau_i]**2))/np.count_nonzero(~np.isnan(self.sf_err[tau_rr == tau_i]))
for tau_i in tau_sort])
def get_tauzero(self):
if self.ndim == 2:
print ('Currently get_tauzero only supports one-dimensional data.')
return
if 'acf' in self.__dict__.keys():
indx = [i for i in range(len(self.acf)-1) if self.acf[i]*self.acf[i+1] <=0]
if len(indx) > 0:
indx_tau0 = indx[0]
self.tau0 = self.tau_x[indx_tau0]
else:
self.tau0 = np.nan
else:
print ('ACF is not found. Calculate ACF first by vf.calc_ac().')
return
def sf_plawfit(self, pini, taurange=[], cutzero=True):
'''
'''
from scipy.optimize import leastsq
# fit function
# power law
plaw = lambda x, param: param[0]*(x**(param[1]))
errfunc = lambda param, x, y: plaw(x, param) - y
#res = leastsq(errfunc, [1e-3, -3], args=(freq_fft[1:], np.abs(res_spec[1:])**2.))
# linear
fln = lambda x, param: param[0] + param[1]*x
errfunc2 = lambda param, x, y: fln(x, param) - y
# fit param
if cutzero:
tau_fit = self.tau_x[1:]
sf_fit = self.sf[1:]
else:
tau_fit = self.tau_x
sf_fit = self.sf
# fitting range
if len(taurange) == 2:
where_fit = (tau_fit > taurange[0]) & (tau_fit <= taurange[-1])
sf_fit = sf_fit[where_fit]
tau_fit = tau_fit[where_fit]
#res = leastsq(errfunc2, [-3, -3], args=(np.log10(tau_sf[where_fit]), np.log10(sf_slice[where_fit])))
#p_out = res[0]
res = leastsq(errfunc2, pini, args=(np.log10(tau_fit), np.log10(sf_fit)))
pout = res[0]
self.fit_results = dict({'pini': pini, 'pout': pout})
# functions for debug
def gaussian2D(x, y, A, mx, my, sigx, sigy, pa=0, peak=True):
'''
Generate normalized 2D Gaussian
Parameters
----------
x: x value (coordinate)
y: y value
A: Amplitude. Not a peak value, but the integrated value.
mx, my: mean values
sigx, sigy: standard deviations
pa: position angle [deg]. Counterclockwise is positive.
'''
x, y = rotate2d(x,y,pa)
mx, my = rotate2d(mx, my, pa)
if peak:
coeff = A
else:
coeff = A/(2.0*np.pi*sigx*sigy)
expx = np.exp(-(x-mx)*(x-mx)/(2.0*sigx*sigx))
expy = np.exp(-(y-my)*(y-my)/(2.0*sigy*sigy))
gauss=coeff*expx*expy
return gauss
# main functions
# autocorrelation function
def ac_1d(data, derr=[], realfreq=True):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
#from itertools import product
nx = len(data)
d_in = data.copy() - np.nanmean(data)
if realfreq:
# auto-correlation
d_ac = np.array([
np.nanmean(d_in[0:nx-j]*d_in[j:nx]) for j in range(nx)])/np.nanvar(data)
else:
# zero-padding
d_in = np.concatenate([d_in, np.zeros(nx-1)])
d_shift = data.copy() - np.nanmean(data)
d_shift = np.concatenate([np.zeros(nx-1), d_shift])
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
nx_out = 2*nx - 1
d_ac = np.array([
np.nanmean(d_in[0:nx_out-i]*d_shift[i:nx_out]) for i in range(nx_out)
])/np.nanvar(data)
if len(derr) == 0:
return d_ac
else:
# error propagation
if realfreq:
d_in_err = derr.copy() # assuming error of mean can be ignored
d_ac_err = np.array([
np.sqrt(np.nansum((d_in[0:nx-j]*d_in_err[j:nx])**2\
+ (d_in[j:nx]*d_in_err[0:nx-j])**2 ))\
/np.count_nonzero(~np.isnan(d_in[0:nx-j]*d_in[j:nx])) for j in range(nx)])/np.nanvar(data)
else:
# zero-padding
d_in_err = np.concatenate([derr, np.zeros(nx-1)])
d_shift_err = np.concatenate([np.zeros(nx-1), derr])
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
# error of each element:
# (m1 +/- sig1)*(m2 +/- sig2) = m1*m2 +/- sqrt((m1*sig2)^2 + (m2*sig1)^2)
# error of mean
# sqrt(Sum(sig_i^2))/N
d_ac_err = np.array([
np.sqrt(np.nansum((d_in[0:nx_out-i]*d_shift_err[i:nx_out])**2 \
+ (d_in_err[0:nx_out-i]*d_shift[i:nx_out])**2))\
/np.count_nonzero(~np.isnan(d_in[0:nx_out-i]*d_shift[i:nx_out])) for i in range(nx_out)
])/np.nanvar(data)
return d_ac, d_ac_err
def ac_fft1(data, realfreq=False):
'''
Calculate auto-correlation using FFT.
'''
nx = len(data)
d_in = np.r_[data - np.nanmean(data), np.zeros(nx-1)] # zero-padding
d_ft = fft(d_in) # Fourier transform
d_ft_cnj = np.conjugate(fft(d_in)) # complex conjugate
d_ac = ifft(d_ft*d_ft_cnj).real
d_ac /= np.r_[np.arange(1,nx+1,1)[::-1], np.arange(1,nx,1)] # weighting
d_ac /= np.nanvar(data)
if realfreq:
d_ac = d_ac[:len(d_ac)//2+1]
else:
d_ac = fftshift(d_ac)
return d_ac
def ac_2d(data, derr=[]):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
nx, ny = data.shape
# zero-padding for convolution
d_in = data.copy() - np.nanmean(data)
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))]
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))]
d_shift = data.copy() - np.nanmean(data)
d_shift = np.r_[np.zeros((d_shift.shape[0]-1,d_shift.shape[1])), d_shift]
d_shift = np.c_[np.zeros((d_shift.shape[0],d_shift.shape[1]-1)), d_shift]
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
# autocorrelation
nx_out = 2*nx - 1
ny_out = 2*ny - 1
d_ac = np.array([
[np.nanmean(
d_in[:nx_out - k, :ny_out - l] * d_shift[k:nx_out, l:ny_out])
for l in range(ny_out)] for k in range(nx_out)])
d_ac /= np.nanvar(data)
if len(derr) == 0:
return d_ac
else:
# error propagation
# zero-padding
d_in_err = derr.copy()
d_in_err = np.r_[d_in_err, np.zeros((d_in_err.shape[0]-1, d_in_err.shape[1]))]
d_in_err = np.c_[d_in_err, np.zeros((d_in_err.shape[0], d_in_err.shape[1]-1))]
d_shift_err = derr.copy()
d_shift_err = np.r_[np.zeros((d_shift_err.shape[0]-1, d_shift_err.shape[1])), d_shift_err]
d_shift_err = np.c_[np.zeros((d_shift_err.shape[0], d_shift_err.shape[1]-1)), d_shift_err]
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
# error of each element:
# (m1 +/- sig1)*(m2 +/- sig2) = m1*m2 +/- sqrt((m1*sig2)^2 + (m2*sig1)^2)
# error of mean
# sqrt(Sum(sig_i^2))/N
d_ac_err = np.array([[
np.sqrt(np.nansum((d_in[:nx_out - k, :ny_out - l]*d_shift_err[k:nx_out, l:ny_out])**2 \
+ (d_in_err[:nx_out - k, :ny_out - l]*d_shift[k:nx_out, l:ny_out])**2))\
/np.count_nonzero(~np.isnan(d_in[:nx_out - k, :ny_out - l]*d_shift[k:nx_out, l:ny_out]))
for l in range(ny_out)] for k in range(nx_out)]
)/np.nanvar(data)
return d_ac, d_ac_err
def ac_fft2(data):
nx, ny = data.shape
d_in = data.copy()
d_in[np.isnan(d_in)] = 0. # fill nan with zero
d_in -= np.nanmean(data)
# zero-padding
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))] # zero-padding for convolution
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))] # zero-padding for convolution
d_ft = fftn(d_in) # Fourier transform
d_ft_cnj = np.conjugate(d_ft) # complex conjugate
d_ac = ifftn(d_ft*d_ft_cnj).real
# weighting with sample number
#print(d_ac.shape[0], nx)
wx = np.concatenate([np.arange(1, nx+1, 1), np.arange(nx-1, 0, -1)])
wx = ifftshift(wx)
wy = np.concatenate([np.arange(1, ny+1, 1), np.arange(ny-1, 0, -1)])
wy = ifftshift(wy)
#wx = np.r_[np.arange(1, d_ac.shape[0]//2+2, 1)[::-1], np.arange(1,d_ac.shape[0]//2+1,1)]
#wy = np.r_[np.arange(1, d_ac.shape[1]//2+2, 1)[::-1], np.arange(1,d_ac.shape[1]//2+1,1)]
wxx, wyy = np.meshgrid(wx, wy)
d_ac /= (wxx*wyy)*np.nanvar(data)
#if realfreq:
# print("Resultant ACF has only the positive axis.")
# print("The output axis length is nx/2.")
# d_ac = d_ac[0:d_ac.shape[1]//2+1,0:d_ac.shape[0]//2+1]
#else:
d_ac = ifftshift(d_ac)
return d_ac
# structure function
def sf_1d(data, derr=[]):
'''
Calculate the structure function.
Parameters
----------
Return
------
'''
nx = len(data)
d_sf = np.array([
np.nanmean((data[:nx-i] - data[i:nx])**2.) for i in range(nx)
])
if len(derr) == 0:
return d_sf
else:
# error propagation
d_sf_err = np.array([
np.sqrt(np.nansum((4.* (data[:nx-i] - data[i:nx])**2. * (derr[:nx-i]**2 + derr[i:nx]**2.))))\
/np.count_nonzero(~np.isnan((data[:nx-i] - data[i:nx]))) for i in range(nx)
])
return d_sf, d_sf_err
def sf_2d(data, derr=[], normalize=False):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
nx, ny = data.shape
# zero-padding for convolution
d_in = data.copy() - np.nanmean(data)
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))]
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))]
d_shift = data.copy() - np.nanmean(data)
d_shift = np.r_[np.zeros((d_shift.shape[0]-1,d_shift.shape[1])), d_shift]
d_shift = np.c_[np.zeros((d_shift.shape[0],d_shift.shape[1]-1)), d_shift]
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
# structure function
nx_out = 2*nx - 1
ny_out = 2*ny - 1
d_sf = np.array([[
np.nanmean(
(d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out])**2. )
for l in range(ny_out)] for k in range(nx_out)])
if normalize:
d_sf /= d_sf[0,0]
if len(derr) == 0:
return d_sf
else:
# error propagation
# zero-padding
d_in_err = derr.copy()
d_in_err = np.r_[d_in_err, np.zeros((d_in_err.shape[0]-1, d_in_err.shape[1]))]
d_in_err = np.c_[d_in_err, np.zeros((d_in_err.shape[0], d_in_err.shape[1]-1))]
d_shift_err = derr.copy()
d_shift_err = np.r_[np.zeros((d_shift_err.shape[0]-1, d_shift_err.shape[1])), d_shift_err]
d_shift_err = np.c_[np.zeros((d_shift_err.shape[0], d_shift_err.shape[1]-1)), d_shift_err]
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
d_sf_err = np.array([[
np.sqrt(np.nansum((4.* (d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out])**2.\
* (d_in_err[:nx_out - k, :ny_out - l]**2. + d_shift_err[k:nx_out, l:ny_out]**2.))))\
/np.count_nonzero(~np.isnan(d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out]))
for l in range(ny_out)] for k in range(nx_out)])
return d_sf, d_sf_err
def pspec_1d(data, realfreq=False):
'''
Calculate Power-spectrum using FFT.
'''
nx = len(data)
d_in = np.r_[data - np.nanmean(data), np.zeros(nx-1)] # zero-padding
d_ft = fft(d_in) # Fourier transform
d_ft_cnj = np.conjugate(fft(d_in)) # complex conjugate
d_ps = (d_ft*d_ft_cnj).real # Power spectrum
if realfreq:
d_ps = d_ps[:len(d_ps)//2+1]
else:
d_ps = fftshift(d_ps)
return d_ps
def binning(bin_e, coordinates, data):
'''
Binning data according to given bins and a set of coordinates and data.
'''
#bin_c = 0.5 .*(bin_e[2:length(bin_e)] .+ bin_e[1:length(bin_e)-1])
d_bin = np.zeros(len(bin_e)-1)
for i in range(len(bin_e)-1):
indx = np.where( (coordinates >= bin_e[i]) & (coordinates < bin_e[i+1]))
if len(indx[0]) == 0:
d_bin[i] = np.nan
else:
d_bin[i] = np.nanmean(data[indx])
return d_bin
# for debug
def main():
# --------- input --------
# test with sin curve
nx, ny = [32, 32]
x = np.linspace(-np.pi,np.pi,nx)
y = np.linspace(-np.pi,np.pi,nx)
dx = x[1] - x[0]
dy = y[1] - y[0]
phi = 0.*np.pi # phase shift
# ------------------------
# ---------- start ---------
# grid
xx, yy = np.meshgrid(x, y, indexing='ij')
z = | np.sin(xx+phi) | numpy.sin |
"""Classes to extend **legacypipe**."""
import os
import re
import logging
import numpy as np
from legacypipe.decam import DecamImage
from legacypipe.bok import BokImage
from legacypipe.mosaic import MosaicImage
from legacypipe.ptf import PtfImage
from legacypipe.cfht import MegaPrimeImage
from legacypipe.survey import LegacySurveyData
from legacypipe.runs import DecamSurvey, NinetyPrimeMosaic
from legacypipe.runcosmos import DecamImagePlusNoise, CosmosSurvey
from astrometry.util.ttime import Time
import tractor
import galsim
logger = logging.getLogger('obiwan.kenobi')
def get_git_version(dirnm=None):
"""
Run 'git describe' in the current directory (or given dir) and return the result as a string.
Parameters
----------
dirnm : string, default=None
If not ``None``, 'cd' to the given directory before running 'git describe'.
Returns
-------
version : string
Git version.
Notes
-----
Taken from https://github.com/legacysurvey/legacypipe/blob/master/py/legacypipe/survey.py
"""
from legacypipe.survey import get_git_version as get_legacypipe_git_version
if dirnm is None:
import obiwan
dirnm = os.path.dirname(obiwan.__file__)
return get_legacypipe_git_version(dirnm=dirnm)
def get_version():
"""Return :func:`get_git_version` if not empty, else :attr:`obiwan.__version__`."""
toret = get_git_version()
if not toret:
from .version import __version__
toret = __version__
return toret
class get_randoms_id(object):
"""Handle identifier related to input random catalog: file id, row start, skip id."""
_keys = ['fileid','rowstart','skipid']
_default = [0]*len(_keys)
_template = 'file%s_rs%s_skip%s'
_kwargs_match_template = {key:'(?P<%s>.*?)' % key for key in _keys}
@classmethod
def keys(cls):
"""Return keys."""
return cls._keys
@classmethod
def default(cls):
"""Return default values."""
return cls._default
@classmethod
def template(cls):
"""Return string template."""
return cls._template
@classmethod
def kwargs_match_template(cls):
"""Return kwargs to reconstruct match template."""
return cls._kwargs_match_template
@classmethod
def match_template(cls):
"""Return match template."""
return cls._template % tuple(cls._kwargs_match_template[key] for key in cls.keys())
@classmethod
def as_dict(cls, **kwargs):
"""Return randoms id kwargs corresponding to kwargs."""
return {key_: kwargs.get(key_,def_) for key_,def_ in zip(cls.keys(),cls.default())}
@classmethod
def as_list(cls, **kwargs):
"""Return list corresponding to randoms id kwargs."""
toret = cls.as_dict(**kwargs)
return [toret[key_] for key_ in cls.keys()]
def __new__(cls, **kwargs):
"""Return string corresponding to randoms id kwargs."""
return cls._template % tuple(cls.as_list(**kwargs))
@classmethod
def match(cls,string):
"""Match randoms id in ``string`` and return randoms id kwargs."""
match = re.match(cls.match_template() + '$',string)
return {key: int(match.group(key)) for key in cls.keys()}
def find_file(base_dir=None, filetype=None, brickname=None, source='obiwan', **kwargs):
"""
Return file name.
Shortcut to :meth:`LegacySurveySim.find_file`.
base_dir : string, default=None
**Obiwan** (if ``source == 'obiwan'``) or legacypipe (if ``source == 'legacypipe'``) root file directory.
filetype : string, default=None
Type of file to find.
brickname : string, default=None
Brick name.
source : string, default='obiwan'
If 'obiwan', return an **Obiwan** output file name, else a **legacypipe** file name.
kwargs : dict
Other arguments to file paths (e.g. :meth:`get_randoms_id.keys`).
"""
if source == 'obiwan':
survey = LegacySurveySim(survey_dir=base_dir,output_dir=base_dir,kwargs_file=get_randoms_id.as_dict(**kwargs))
else:
survey = LegacySurveyData(survey_dir=base_dir,output_dir=base_dir)
kwargs = {key:val for key,val in kwargs.items() if key not in get_randoms_id.keys()}
return survey.find_file(filetype,brick=brickname,output=False,**kwargs)
def find_legacypipe_file(survey_dir, filetype, brickname=None, **kwargs):
"""
Return **legacypipe** file name.
survey_dir : string
Survey directory.
filetype : string
Type of file to find.
brickname : string
Brick name.
kwargs : dict
Other arguments to file paths (e.g. :meth:`get_randoms_id.keys`).
"""
return find_file(base_dir=survey_dir,filetype=filetype,brickname=brickname,source='legacypipe',**kwargs)
def find_obiwan_file(output_dir, filetype, brickname=None, **kwargs):
"""
Return **Obiwan** output file name.
output_dir : string
**Obiwan** output directory.
filetype : string
Type of file to find.
brickname : string
Brick name.
kwargs : dict
Other arguments to file paths (e.g. :meth:`get_randoms_id.keys`).
"""
return find_file(base_dir=output_dir,filetype=filetype,brickname=brickname,source='obiwan',**kwargs)
class BaseSimSurvey(object):
"""
Dumb class with **Obiwan** attributes for future multiple inheritance.
Attributes
----------
simcat : SimCatalog
See below.
sim_stamp : string
See below.
add_sim_noise : string
See below.
image_eq_model : bool
See below.
kwargs_file : dict
See below.
rng : numpy.random.RandomState
Random state, from :attr:`seed``.
image_typemap : dict
Mapping (camera,class) used by :class:`legacypipe.survey.LegacySurveyData`.
survey_dir : string
Directory containing input imaging data.
output_dir : string
Directory containing output catalogs.
"""
def __init__(self, *args, simcat=None, sim_stamp='tractor', add_sim_noise=False,
image_eq_model=False, seed=0, kwargs_file=None, **kwargs):
"""
kwargs are to be passed on to :class:`legacypipe.survey.LegacySurveyData`-inherited classes, other arguments are specific to :class:`BaseSimSurvey`.
Only ``survey_dir`` must be specified to obtain bricks through :meth:`get_brick_by_name`.
Parameters
----------
simcat : SimCatalog, default=None
Simulated source catalog for a given brick (not CCD).
sim_stamp : string, default='tractor'
Method to simulate objects, either 'tractor' (:class:`TractorSimStamp`) or 'galsim' (:class:`GalSimStamp`).
add_sim_noise : string, default=False
Add noise from the simulated source to the image. Choices: ['gaussian','poisson'].
image_eq_model : bool, default=False
Wherever add a simulated source, replace both image and inverse variance of the image
with that of the simulated source only.
seed : int, default=0
For random number generators.
kwargs_file : dict, default=None
Extra arguments to file paths (e.g. :meth:`get_randoms_id.keys`).
kwargs : dict
Arguments for :class:`legacypipe.survey.LegacySurveyData`.
"""
super(BaseSimSurvey, self).__init__(*args,**kwargs)
self.image_typemap = {
'decam': DecamSimImage,
'decam+noise': DecamSimImagePlusNoise,
'mosaic': MosaicSimImage,
'mosaic3': MosaicSimImage,
'90prime': BokSimImage,
'ptf': PtfSimImage,
'megaprime': MegaPrimeSimImage,
}
kwargs_file = kwargs_file or {}
for key in ['simcat','sim_stamp','add_sim_noise','image_eq_model','kwargs_file']:
setattr(self,key,locals()[key])
self.rng = np.random.RandomState(seed)
def find_file(self, filetype, brick=None, output=False, stage=None, **kwargs):
"""
Return the file name of a Legacy Survey file.
Parameters
----------
filetype : string
Type of file to find, including:
- 'randoms': input random catalogues
- 'pickle': pickle files
- 'checkpoint': checkpoint files
- 'log' : log files
- 'ps' : ps (resources time series) catalogs
- 'tractor': **Tractor** catalogs
- 'depth': PSF depth maps
- 'galdepth': canonical galaxy depth maps
- 'nexp': number-of-exposure maps.
brick : string, defaut=None
Brick name.
output : bool, default=False
Whether we are about to write this file; will use :attr:`output_dir` as
the base directory rather than :attr:`survey_dir`.
stage : string, default=None
Stage, only used if ``filetype == 'pickle'``.
kwargs : dict
Arguments for :meth:`legacypipe.survey.LegacySurveyData.find_file`.
Returns
-------
fn : string
Path to the specified file (whether or not it exists).
"""
if brick is None:
brickname = '%(brick)s'
brickpre = '%(brick).3s'
else:
brickname = brick
brickpre = brick[:3]
if stage is None:
stage = '%(stage)s'
if filetype == 'randoms':
base_dir = os.path.join(self.output_dir,'obiwan',brickpre,brickname,get_randoms_id(**self.kwargs_file))
return os.path.join(base_dir,'randoms-%s.fits' % brickname)
if filetype == 'pickle':
base_dir = os.path.join(self.output_dir,'pickle',brickpre,brickname,get_randoms_id(**self.kwargs_file))
return os.path.join(base_dir,'pickle-%s-%s.pickle' % (brickname,stage))
if filetype == 'checkpoint':
base_dir = os.path.join(self.output_dir,'checkpoint',brickpre,brickname,get_randoms_id(**self.kwargs_file))
return os.path.join(base_dir,'checkpoint-%s.pickle' % brickname)
if filetype == 'log':
base_dir = os.path.join(self.output_dir,'log',brickpre,brickname,get_randoms_id(**self.kwargs_file))
return os.path.join(base_dir,'log-%s.log' % brickname)
if filetype == 'ps':
sources_fn = super(BaseSimSurvey,self).find_file('ref-sources',brick=brick,output=output,**kwargs)
dirname = os.path.dirname(sources_fn)
basename = os.path.basename(sources_fn).replace('reference','ps')
fn = os.path.join(dirname,basename)
if fn == sources_fn: # make sure not to overwrite ref sources catalogs
raise ValueError('ps path is the same as reference sources = %s' % sources_fn)
else:
fn = super(BaseSimSurvey,self).find_file(filetype,brick=brick,output=output,**kwargs)
def wrap(fn):
basename = os.path.basename(fn)
dirname = os.path.dirname(fn)
ddirname = os.path.dirname(dirname)
if os.path.dirname(ddirname).endswith('/coadd'):
return os.path.join(ddirname,brickname,get_randoms_id(**self.kwargs_file),basename)
if ddirname.endswith('/metrics') or ddirname.endswith('/tractor') or ddirname.endswith('/tractor-i'):
return os.path.join(dirname,brickname,get_randoms_id(**self.kwargs_file),basename)
return fn
if isinstance(fn,list):
fn = list(map(wrap,fn))
elif fn is not None:
fn = wrap(fn)
return fn
class LegacySurveySim(BaseSimSurvey,LegacySurveyData):
"""Extend :class:`BaseSimSurvey` with :class:`legacypipe.survey.LegacySurveyData`."""
class CosmosSim(BaseSimSurvey,CosmosSurvey):
"""
Extend :class:`BaseSimSurvey` with a filter for cosmos CCDs.
Call with BaseSimSurvey arguments plus additional CosmosSurvey argument ``subset``.
"""
class DecamSim(BaseSimSurvey,DecamSurvey):
"""Extend :class:`BaseSimSurvey` with a filter for DECam CCDs."""
class NinetyPrimeMosaicSim(BaseSimSurvey,NinetyPrimeMosaic):
"""Extend :class:`BaseSimSurvey` with a filter for mosaic or 90prime CCDs."""
runs = {
'decam': DecamSim,
'90prime-mosaic': NinetyPrimeMosaicSim,
'south': DecamSim,
'north': NinetyPrimeMosaicSim,
'cosmos': CosmosSim,
None: LegacySurveySim,
}
def get_survey(name, **kwargs):
"""
Return an instance of the :class:`BaseSimSurvey`-inherited class given by name.
See :attr:`obiwan.kenobi.runs` dictionary.
"""
survey_class = runs[name]
if name != 'cosmos':
kwargs.pop('subset',None)
survey = survey_class(**kwargs)
return survey
class GSImage(galsim.Image):
"""Extend :class:`galsim.Image`, with other ``__setitem__`` options."""
def __setitem__(self, *args):
"""
Extend ``galsim.Image.__setitem__`` to allow:
- numpy-style ``self[ndarray1] = ndarray2``
- hybdrid-style ``self[bounds] = ndarray``
"""
if len(args) == 2:
# allows numpy-style ``self[ndarray1] = ndarray2``
if isinstance(args[0], np.ndarray):
self._array[args[0]] = args[1]
return
# allows settings ``self[bounds] = ndarray``
if isinstance(args[0], galsim.BoundsI) and isinstance(args[1], np.ndarray):
args = (args[0],self.__class__(args[1], bounds=args[0]))
super(GSImage,self).__setitem__(*args)
def _Image(array, bounds, wcs):
"""
Function of :mod:`galsim.image` redefined to have all methods of :class:`galsim.Image` consistent within :class:`GSImage` (e.g. :meth:`galsim.Image.copy`).
Equivalent to ``GSImage(array, bounds, wcs)``, but without the overhead of sanity checks,
and the other options for how to provide the arguments.
"""
ret = GSImage.__new__(GSImage)
ret.wcs = wcs
ret._dtype = array.dtype.type
if ret._dtype in GSImage._alias_dtypes:
ret._dtype = GSImage._alias_dtypes[ret._dtype]
array = array.astype(ret._dtype)
ret._array = array
ret._bounds = bounds
return ret
galsim.image._Image = _Image
class BaseSimImage(object):
"""Dumb class that extends :meth:`legacypipe.image.get_tractor_image` for future multiple inheritance."""
def get_tractor_image(self, **kwargs):
get_dq = kwargs.get('dq', True)
kwargs['dq'] = True # dq required in the following
if not kwargs.get('nanomaggies', True):
raise NotImplementedError('In Obiwan, images are assumed to be in nanomaggies.')
#print('slice',kwargs['slc'])
tim = super(BaseSimImage,self).get_tractor_image(**kwargs)
if tim is None: # this can be None when the edge of a CCD overlaps
return tim
tim_dq = GSImage(tim.dq,xmin=1,ymin=1)
if not get_dq: del tim.dq
if self.survey.simcat is None or not len(self.survey.simcat): # empty catalog
return tim
# Grab the data and inverse variance images [nanomaggies!]
tim_image = GSImage(tim.getImage(),xmin=1,ymin=1)
with np.errstate(divide='ignore', invalid='ignore'):
tim_var = GSImage(1./tim.getInvvar(),xmin=1,ymin=1)
# Also store galaxy sims and sims invvar
sims_image = tim_image.copy()
sims_image.fill(0.)
sims_var = sims_image.copy()
# Store simulated galaxy images in tim object
# Loop on each object.
if self.survey.sim_stamp == 'tractor':
objstamp = TractorSimStamp(tim)
else:
objstamp = GalSimStamp(tim)
any_overlap = False
for obj in self.survey.simcat:
t0 = Time()
logger.info('%s drawing object id=%d, band=%s: flux=%.2g, sersic=%.2f, shape_r=%.2f, shape_e1=%.2f, shape_e2=%.2f',
objstamp.__class__.__name__,obj.id,objstamp.band,obj.get('flux_%s' % objstamp.band),obj.sersic,obj.shape_r,obj.shape_e1,obj.shape_e2)
stamp = objstamp.draw(obj)
if stamp is None:
logger.debug('Stamp does not overlap tim for object id=%d',obj.id)
continue
t0 = logger.debug('Finished drawing object id=%d: band=%s flux=%.2f addedflux=%.2f in %s',
obj.id,objstamp.band,obj.get('flux_%s' % objstamp.band),stamp.array.sum(),Time()-t0)
overlap = stamp.bounds & tim_image.bounds
# Add source if at least 1 pix falls on the CCD
if overlap.area() > 0:
any_overlap = True
logger.debug('Stamp overlaps tim: id=%d band=%s',obj.id,objstamp.band)
stamp = stamp[overlap].array
nano2e = self.get_nano2e(tim=tim,x=np.arange(overlap.xmin,overlap.xmax+1),y=np.arange(overlap.ymin,overlap.ymax+1))
if self.survey.add_sim_noise:
stamp_pos = stamp.clip(0)
if self.survey.add_sim_noise == 'gaussian':
logger.debug('Adding Gaussian noise.')
stamp += np.sqrt(stamp_pos)/np.sqrt(nano2e)*self.survey.rng.randn(*stamp.shape)
else: # poisson
logger.debug('Adding Poisson noise.')
stamp += self.survey.rng.poisson(stamp_pos*nano2e,size=stamp.shape)/nano2e - stamp_pos
# Add stamp to image
tim_image[overlap] += stamp
# Compute stamp variance
stamp_var = np.abs(stamp)/nano2e
stamp_var[tim_dq[overlap].array > 0] = 0.
tim_var[overlap] += stamp_var
# Extra
sims_image[overlap] += stamp
sims_var[overlap] += stamp_var
tim.sims_image = sims_image.array
tim.sims_inverr = | np.zeros_like(tim.sims_image) | numpy.zeros_like |
# Copyright 2019 <NAME>.
#
# This file is part of Mi3-GPU.
#
# Mi3-GPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Mi3-GPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mi3-GPU. If not, see <http://www.gnu.org/licenses/>.
#
#Contact: allan.haldane _AT_ gmail.com
import scipy
import numpy as np
from numpy.random import RandomState
import pyopencl as cl
import pyopencl.array as cl_array
import os, time, warnings
import textwrap
from utils import printsome
import collections
cf = cl.mem_flags
rng_buf_mul = 1024
################################################################################
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '0'
os.environ['PYOPENCL_NO_CACHE'] = '1'
os.environ["CUDA_CACHE_DISABLE"] = '1'
def printPlatform(log, p, n=0):
log("Platform {} '{}':".format(n, p.name))
log(" Vendor: {}".format(p.vendor))
log(" Version: {}".format(p.version))
exts = ("\n" + " "*16).join(textwrap.wrap(p.extensions, 80-16))
log(" Extensions: {}".format(exts))
def printDevice(log, d):
log(" Device '{}':".format(d.name))
log(" Vendor: {}".format(d.vendor))
log(" Version: {}".format(d.version))
log(" Driver Version: {}".format(d.driver_version))
log(" Max Clock Frequency: {}".format(d.max_clock_frequency))
log(" Max Compute Units: {}".format(d.max_compute_units))
log(" Max Work Group Size: {}".format(d.max_work_group_size))
log(" Global Mem Size: {}".format(d.global_mem_size))
log(" Global Mem Cache Size: {}".format(d.global_mem_cache_size))
log(" Local Mem Size: {}".format(d.local_mem_size))
log(" Max Constant Buffer Size: {}".format(d.max_constant_buffer_size))
def printGPUs(log):
for n,p in enumerate(cl.get_platforms()):
printPlatform(log, p, n)
for d in p.get_devices():
printDevice(log, d)
log("")
################################################################################
# The GPU performs two main types of computation: MCMC runs, and perturbed
# coupling updates. MCMCGPU methods are asynchronous on the host.
# Functions that return data do not return the data directly, but return a
# FutureBuf object. The data may be obtained by FutureBuf.read(), which is
# blocking.
# The gpu has two sequence buffers: A "small" buffer for MCMC gpu generation,
# and an optional "large buffer" for combined sequence sets.
# The opencl queue is created as an out-of-order queue, and so kernel order is
# managed by the MCMCGPU class itself. By default, it makes all opencl
# commands wait until the last command is finished, but all methods also
# have a wait_for argument to override this. `None` means wait until the last
# command is done, or it can be a list of opencl events to wait for. Set it
# to the empty list [] to run immediately.
# Note that in openCL implementations there is generally a limit on the number
# of queued commands allowed in a context. If you reach the limit, all queues
# will block until a kernel finishes. So we must be careful not to fill up a
# single queue before others, so do `for i in range(100): for g in gpus:
# g.command()` instead of `for g in gpus: for i in range(100): g.command()` as
# the latter may fill the first gpu's queue, blocking the rest.
# See CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE and CL_DEVICE_MAX_ON_DEVICE_EVENTS
# Note that on some systems there is a watchdog timer that kills any kernel
# that runs for too long. You will get a CL_OUT_OF_RESOURCES error if this
# happens, which occurs when the *following* kernel is run.
class FutureBuf:
def __init__(self, buffer, event, postprocess=None):
self.buffer = buffer
self.event = event
self.postfunc = postprocess
self.shape = buffer.shape
self.dtype = buffer.dtype
def read(self):
self.event.wait()
if self.postfunc != None:
return self.postfunc(self.buffer)
return self.buffer
class MCMCGPU:
def __init__(self, gpuinfo, L, q, nseq, wgsize, outdir,
vsize, seed, profile=False):
if nseq%512 != 0:
raise ValueError("nwalkers/ngpus must be a multiple of 512")
# this guarantees that all kernel access to seqmem is coalesced and
# simplifies the histogram kernels
self.L = L
self.q = q
self.nPairs = L*(L-1)//2
self.events = collections.deque()
self.SWORDS = ((L-1)//4+1) #num words needed to store a sequence
self.SBYTES = (4*self.SWORDS) #num bytes needed to store a sequence
self.nseq = {'main': nseq}
self.nwalkers = nseq
device, gpunum, ctx, prg = gpuinfo
self.gpunum = gpunum
self.ctx = ctx
self.prg = prg
self.device = device
self.wgsize = wgsize
self.nhist, self.histws = histogram_heuristic(q)
# sanity checks (should be checked elsewhere before this)
if nseq%wgsize != 0:
raise Exception("nseq per GPU must be a multiple of wgsize")
if wgsize < q*q:
raise Exception("wgsize cannot be less than q*q")
self.logfn = os.path.join(outdir, 'gpu-{}.log'.format(gpunum))
with open(self.logfn, "wt") as f:
printDevice(f.write, device)
self.mcmcprg = prg.metropolis
self.rngstate = RandomState(seed)
#setup opencl for this device
self.log("Getting CL Queue")
qprop = cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE
self.profile = profile
if profile:
qprop |= cl.command_queue_properties.PROFILING_ENABLE
self.queue = cl.CommandQueue(ctx, device=device, properties=qprop)
self.log("\nOpenCL Device Compilation Log:")
self.log(self.prg.get_build_info(device, cl.program_build_info.LOG))
maxwgs = self.mcmcprg.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE, device)
self.log("Max MCMC WGSIZE: {}".format(maxwgs))
self.initted = []
self.bufs = {}
self.buf_spec = {}
self.seqbufs = {}
self.Ebufs = {}
self.largebufs = []
# setup essential buffers
nPairs, SWORDS = self.nPairs, self.SWORDS
j_pad = 3*self.wgsize
self._setupBuffer( 'J', '<f4', (nPairs, q*q), pad=j_pad)
self._setupBuffer('Junpacked', '<f4', (L*L, q*q), pad=j_pad)
self._setupBuffer( 'bi', '<f4', (nPairs, q*q)),
self._setupBuffer( 'bicount', '<u4', (nPairs, q*q)),
self._setupBuffer( 'seq main', '<u4', (SWORDS, self.nseq['main'])),
self._setupBuffer('seqL main', '<u4', (L, self.nseq['main']//4)),
self._setupBuffer( 'E main', '<f4', (self.nseq['main'],)),
self.unpackedJ = False #use to keep track of whether J is unpacked
self.repackedSeqT = {'main': False}
self.lastevt = None
def log(self, msg):
#logs are rare, so just open the file every time
with open(self.logfn, "at") as f:
print("{: 10.3f}".format(time.process_time()), msg, file=f)
def logevt(self, name, evt, nbytes=None):
self.lastevt = evt
# don't save events if not profiling.
# note that saved events use up memory - free it using logprofile
if self.profile:
if len(self.events)%1000 == 0 and len(self.events) != 0:
self.log("Warning: Over {} profiling events are not flushed "
"(using up memory)".format(len(self.events)))
if nbytes:
self.events.append((evt, name, nbytes))
else:
self.events.append((evt, name))
return evt
def _evtlist(self, evts):
if evts is None:
return []
elif isinstance(evts, cl.Event):
return [evts]
else:
return evts
def _waitevt(self, evts=None):
if evts is None and self.lastevt is not None:
return [self.lastevt]
return self._evtlist(evts)
def logProfile(self):
if not self.profile:
return
def isComplete(e):
return (e.command_execution_status ==
cl.command_execution_status.COMPLETE)
with open(self.logfn, "at") as f:
while len(self.events) != 0 and isComplete(self.events[0][0]):
dat = self.events.popleft()
evt, name, size = dat[0],dat[1],(dat[2] if len(dat)==3 else '')
print("EVT", name, evt.profile.start, evt.profile.end,
size, file=f)
def _setupBuffer(self, bufname, buftype, bufshape, pad=None,
flags=cf.READ_WRITE):
flags = flags | cf.ALLOC_HOST_PTR
nelem = int(np.product(bufshape))
if pad:
nelem = nelem + pad
size = np.dtype(buftype).itemsize * nelem
buf = cl.Buffer(self.ctx, flags, size=size)
self.bufs[bufname] = buf
self.buf_spec[bufname] = (buftype, bufshape, flags)
# add it to convenience dicts if applicable
names = bufname.split()
if len(names) > 1:
bufs = {'seq': self.seqbufs, 'E': self.Ebufs}
if names[0] in bufs:
bufs[names[0]][names[1]] = buf
def require(self, *reqs):
for r in reqs:
if r not in self.initted:
raise Exception("{} not initialized".format(r))
def _initcomponent(self, cmp):
if cmp in self.initted:
raise Exception("Already initialized {}".format(cmp))
self.initted.append(cmp)
def initMCMC(self, nsteps, rng_offset, rng_span):
self._initcomponent('MCMC')
# rngstates should be size of mwc64xvec2_state_t
self.nsteps = nsteps
self._setupBuffer('rngstates', '<2u8', (self.nseq['main'],)),
self._setupBuffer( 'Bs', '<f4', (self.nseq['main'],)),
self._setupBuffer( 'randpos', '<u4', (self.nsteps*rng_buf_mul,))
self.randpos_offset = rng_buf_mul*self.nsteps
self.setBuf('Bs', np.ones(self.nseq['main'], dtype='<f4'))
self._initMCMC_RNG(rng_offset, rng_span)
self.nsteps = int(nsteps)
def initLargeBufs(self, nseq_large):
self._initcomponent('Large')
self.nseq['large'] = nseq_large
self._setupBuffer( 'seq large', '<u4', (self.SWORDS, nseq_large))
self._setupBuffer( 'seqL large', '<u4', (self.L, nseq_large//4)),
self._setupBuffer( 'E large', '<f4', (nseq_large,))
self._setupBuffer('weights large', '<f4', (nseq_large,))
self.largebufs.extend(['seq large', 'seqL large', 'E large',
'weights large'])
self.nstoredseqs = 0
# it is important to zero out the large seq buffer, because
# if it is partially full we may need to compute energy
# over the padding sequences at the end to get a full wg.
buf = self.bufs['seq large']
self.fillBuf('seq large', 0)
self.repackedSeqT['large'] = False
def initSubseq(self):
self.require('Large')
self._initcomponent('Subseq')
self._setupBuffer('markpos', '<u1', (self.SBYTES,), flags=cf.READ_ONLY)
self.markPos(np.zeros(self.SBYTES, '<u1'))
## we may want to select replicas at a particular temperature
#def initMarkSeq(self):
# self._initcomponent('Markseq')
# self._setupBuffer( 'markseqs', '<i4', (self.nseq['main'],))
# self.setBuf('markseqs', np.arange(self.nseq['main'], dtype='<i4'))
# self.nmarks = self.nseq['main']
def initJstep(self):
self._initcomponent('Jstep')
nPairs, q = self.nPairs, self.q
self._setupBuffer( 'dJ', '<f4', (nPairs, q*q))
self._setupBuffer('bi target', '<f4', (nPairs, q*q))
self._setupBuffer( 'Creg', '<f4', (nPairs, q*q))
self._setupBuffer( 'Xlambdas', '<f4', (nPairs,))
self._setupBuffer( 'neff', '<f4', (1,))
self._setupBuffer( 'weights', '<f4', (self.nseq['main'],))
def packSeqs_4(self, seqs):
"""
Converts seqs to 4-byte uint format on CPU, padded to 32bits, assumes
little endian. Each row's bytes are
a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 ...
for sequences a, b, c, so each uint32 correaponds to 4 seq bytes.
"""
if seqs.dtype != np.dtype('<u1'):
raise Exception("seqs must have u1 dtype")
bseqs = np.zeros((seqs.shape[0], self.SBYTES), dtype='<u1', order='C')
bseqs[:,:self.L] = seqs
mem = np.zeros((self.SWORDS, seqs.shape[0]), dtype='<u4', order='C')
for i in range(self.SWORDS):
mem[i,:] = bseqs.view(np.uint32)[:,i]
return mem
def unpackSeqs_4(self, mem):
""" reverses packSeqs_4 (on CPU)"""
bseqs = np.zeros((mem.shape[1], self.SBYTES), dtype='<u1', order='C')
for i in range(self.SWORDS): #undo memory rearrangement
bseqs.view(np.uint32)[:,i] = mem[i,:]
return bseqs[:,:self.L]
def repackseqs_T(self, bufname, wait_for=None):
"""
On GPU, copies the seq buffer (in 4-byte format) to a seqL buffer
in "transpose" format, which is just the usual CPU sequence buffer
but transposed.
"""
self.log("repackseqs_T")
nseq = self.nseq[bufname]
inseq_dev = self.bufs['seq ' + bufname]
outseq_dev = self.bufs['seqL ' + bufname]
self.repackedSeqT[bufname] = True
return self.logevt('repackseqs_T',
self.prg.unpackseqs1(self.queue, (self.SWORDS*256,), (256,),
inseq_dev, np.uint32(nseq),
outseq_dev, np.uint32(nseq//4),
wait_for=self._waitevt(wait_for)))
def unpackJ(self, wait_for=None):
"""convert J from format where every row is a unique ij pair (L choose 2
rows) to format with every pair, all orders (L^2 rows)."""
# quit if J already loaded/unpacked
if self.unpackedJ:
return wait_for
self.log("unpackJ")
q, nPairs = self.q, self.nPairs
self.unpackedJ = True
return self.logevt('unpackJ',
self.prg.unpackfV(self.queue, (nPairs*q*q,), (q*q,),
self.bufs['J'], self.bufs['Junpacked'],
wait_for=self._waitevt(wait_for)))
def _initMCMC_RNG(self, rng_offset, rng_span, wait_for=None):
self.require('MCMC')
self.log("initMCMC_RNG")
# Idea is we want to divide the rng stream into non-overlapping chunks
# for each walker. This GPU was given a span of rng_span, so divide it
# by number of walkers (times 2 since each rng call advances by 2 for
# vec2).
# Read mwc64 docs for more info. mwc64 doc says each walker's stream
# offset is (rng_offset + walker_span*(get_global_id(0)*vectorSize +
# vecind)) where vectorSize is 2. Note that rng_offset is not
# multiplied by 2!
rng_offset = np.uint64(rng_offset)
nwalkers = np.uint64(self.nseq['main'])
v2 = np.uint64(2)
# walker span is the # of rng calls assigned per walker
walker_span = np.uint64(rng_span)//(v2*nwalkers) # factor of 2 for vec2
self.log("RNG offset: {} walker-span: {} nwalkers {}".format(
rng_offset, walker_span, nwalkers))
# Warning: It is very important that the walker rng stream offsets
# across gpus are all distinct, or else some walkers will be highly
# correlated. Touch this code with care.
assert(walker_span*v2*nwalkers <= rng_span)
wgsize = self.wgsize
while wgsize > nwalkers:
wgsize = wgsize//2
return self.logevt('initMCMC_RNG',
self.prg.initRNG2(self.queue, (nwalkers,), (wgsize,),
self.bufs['rngstates'],
np.uint64(rng_offset), walker_span,
wait_for=self._waitevt(wait_for)))
def updateRngPos(self, wait_evt=None):
self.randpos_offset = self.randpos_offset + self.nsteps
rng_evt = None
bufsize = rng_buf_mul*self.nsteps
if self.randpos_offset >= bufsize:
# all gpus use same position-rng series. This way there is no
# difference between running on one gpu vs splitting on multiple
rng = self.rngstate.randint(0, self.L, size=bufsize).astype('u4')
rng_evt = self.setBuf('randpos', rng, wait_for=wait_evt)
self.randpos_offset = 0
return np.uint32(self.randpos_offset), rng_evt
def runMCMC(self, wait_for=None):
"""Performs a single round of mcmc sampling (nsteps MC steps)"""
t1 = time.time()
self.require('MCMC')
self.log("runMCMC")
wait_evt = self._waitevt(wait_for)
nseq = self.nseq['main']
nsteps = self.nsteps
wait_unpack = self.unpackJ(wait_for=wait_evt)
rngoffset, wait_rng = self.updateRngPos(wait_evt)
wait = self._evtlist(wait_unpack) + self._evtlist(wait_rng)
self.repackedSeqT['main'] = False
return self.logevt('mcmc',
self.mcmcprg(self.queue, (nseq,), (self.wgsize,),
self.bufs['Junpacked'], self.bufs['rngstates'],
rngoffset, self.bufs['randpos'], np.uint32(nsteps),
self.Ebufs['main'], self.bufs['Bs'],
self.seqbufs['main'],
wait_for=wait))
def measureFPerror(self, log, nloops=3):
log("Measuring FP Error")
for n in range(nloops):
self.runMCMC()
e1 = self.getBuf('E main').read()
self.calcEnergies('main')
e2 = self.getBuf('E main').read()
log("Run", n, "Error:", np.mean((e1-e2)**2))
log(' Final E MC', printsome(e1), '...')
log(" Final E rc", printsome(e2), '...')
seqs = self.getBuf('seq main').read()
J = self.getBuf('J').read()
e3 = getEnergies(seqs, J)
log(" Exact E", e3[:5])
log(" Error:", np.mean([float((a-b)**2) for a,b in zip(e1, e3)]))
def calcBicounts(self, seqbufname, wait_for=None):
self.log("calcBicounts " + seqbufname)
L, q, nPairs, nhist = self.L, self.q, self.nPairs, self.nhist
if seqbufname == 'main':
nseq = self.nseq[seqbufname]
buflen = nseq
else:
nseq = self.nstoredseqs
buflen = self.nseq[seqbufname]
seq_dev = self.seqbufs[seqbufname]
localhist = cl.LocalMemory(nhist*q*q*np.dtype(np.uint32).itemsize)
return self.logevt('calcBicounts',
self.prg.countBivariate(self.queue, (nPairs*nhist,), (nhist,),
self.bufs['bicount'],
np.uint32(nseq), seq_dev, np.uint32(buflen), localhist,
wait_for=self._waitevt(wait_for)))
def bicounts_to_bimarg(self, seqbufname='main', wait_for=None):
self.log("bicounts_to_bimarg ")
q, nPairs = self.q, self.nPairs
if seqbufname == 'main':
nseq = self.nseq['main']
else:
nseq = self.nstoredseqs
nworkunits = self.wgsize*((nPairs*q*q-1)//self.wgsize+1)
return self.logevt('bicounts_to_bimarg',
self.prg.bicounts_to_bimarg(self.queue,
(nworkunits,), (self.wgsize,),
self.bufs['bicount'], self.bufs['bi'], np.uint32(nseq),
wait_for=self._waitevt(wait_for)))
def calcEnergies(self, seqbufname, Jbufname='J', wait_for=None):
self.log("calcEnergies " + seqbufname)
energies_dev = self.Ebufs[seqbufname]
seq_dev = self.seqbufs[seqbufname]
buflen = self.nseq[seqbufname]
if seqbufname == 'main':
nseq = self.nseq[seqbufname]
else:
nseq = self.nstoredseqs
# pad to be a multiple of wgsize (uses dummy seqs at end)
nseq = nseq + ((self.wgsize - nseq) % self.wgsize)
return self.logevt('getEnergies',
self.prg.getEnergies(self.queue, (nseq,), (self.wgsize,),
self.bufs[Jbufname], seq_dev, np.uint32(buflen),
energies_dev, wait_for=self._waitevt(wait_for)))
def weightedMarg(self, seqbufname='main', wait_for=None):
self.require('Jstep')
self.log("weightedMarg")
q, L, nPairs = self.q, self.L, self.nPairs
nhist, histws = self.nhist, self.histws
if seqbufname == 'main':
nseq = self.nseq[seqbufname]
buflen = nseq//4
weights_dev = self.bufs['weights']
else:
nseq = self.nstoredseqs
buflen = self.nseq[seqbufname]//4
weights_dev = self.bufs['weights large']
# pad to be a multiple of 512 (uses dummy seqs at end)
nseq = nseq + ((512 - nseq) % 512)
# XXX find correct padding here
if not self.repackedSeqT[seqbufname]:
wait_for = self.repackseqs_T(seqbufname,
wait_for=self._waitevt(wait_for))
seq_dev = self.bufs['seqL ' + seqbufname]
return self.logevt('weightedMarg',
self.prg.weightedMarg(self.queue, (nPairs*histws,), (histws,),
self.bufs['bi'], weights_dev,
np.uint32(nseq), seq_dev, np.uint32(buflen),
wait_for=self._waitevt(wait_for)))
def renormalize_bimarg(self, wait_for=None):
self.log("renormalize_bimarg")
q, nPairs = self.q, self.nPairs
return self.logevt('renormalize_bimarg',
self.prg.renormalize_bimarg(self.queue, (nPairs*q*q,), (q*q,),
self.bufs['bi'], wait_for=self._waitevt(wait_for)))
def addBiBuffer(self, bufname, otherbuf, wait_for=None):
# used for combining results from different gpus, where otherbuf is a
# buffer "belonging" to another gpu
self.log("addbuf")
selfbuf = self.bufs[bufname]
if selfbuf.size != otherbuf.size:
raise Exception('Tried to add bufs of different sizes')
q, nPairs = self.q, self.nPairs
nworkunits = self.wgsize*((nPairs*q*q-1)//self.wgsize+1)
return self.logevt('addbuf',
self.prg.addBiBufs(self.queue, (nworkunits,), (self.wgsize,),
selfbuf, otherbuf, wait_for=self._waitevt(wait_for)))
def updateJ(self, gamma, pc, Jbuf='dJ', wait_for=None):
self.require('Jstep')
self.log("updateJ")
q, nPairs = self.q, self.nPairs
#find next highest multiple of wgsize, for num work units
nworkunits = self.wgsize*((nPairs*q*q-1)//self.wgsize+1)
bibuf = self.bufs['bi']
Jin = Jout = self.bufs[Jbuf]
self.unpackedJ = False
return self.logevt('updateJ',
self.prg.updatedJ(self.queue, (nworkunits,), (self.wgsize,),
self.bufs['bi target'], bibuf,
np.float32(gamma), np.float32(pc), Jin, Jout,
wait_for=self._waitevt(wait_for)))
def reg_l1z(self, gamma, pc, lJ, wait_for=None):
self.require('Jstep')
self.log("reg_l1z")
q, nPairs = self.q, self.nPairs
bibuf = self.bufs['bi']
self.unpackedJ = None
return self.logevt('reg_l1z',
self.prg.reg_l1z(self.queue, (nPairs*q*q,), (q*q,),
bibuf, np.float32(gamma), | np.float32(pc) | numpy.float32 |
"""
-----------------------------------------------------------------------
Harmoni: a Novel Method for Eliminating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://doi.org/10.1101/2021.10.06.463319
-----------------------------------------------------------------------
script for:
** proof of concept example **
-----------------------------------------------------------------------
(c) <NAME> (<EMAIL>) @ Neurolgy Dept, MPI CBS, 2021
https://github.com/minajamshidi
(c) please cite the above paper in case of using this code for your research
License: MIT License
-----------------------------------------------------------------------
last modified: 20210930 by \Mina
-----------------------------------------------------------------------
-----------------------------------------------------------------------
"""
import os.path as op
from matplotlib import pyplot as plt
import numpy as np
from numpy import pi
import mne
from mne.minimum_norm import read_inverse_operator
from tools_connectivity_plot import *
from tools_connectivity import *
from tools_meeg import *
from tools_source_space import *
from tools_general import *
from harmoni.harmonitools import harmonic_removal
# -----------------------------------------
# paths
# -----------------------------------------
# subjects_dir = '/NOBACKUP/mne_data/'
subjects_dir = '/data/pt_02076/mne_data/MNE-fsaverage-data/'
subject = 'fsaverage'
_oct = '6'
fwd_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-64ch-fwd.fif')
inv_op_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-64ch-inv.fif')
simulated_data_dir = '../harmoni-supplementary-data/simulated_data/proof_of_concept_data'
raw_dir = '../harmoni-supplementary-data/simulated_data/proofconcept_simulated_sesorspace-raw.fif'
# -----------------------------------------
# set parameters
# -----------------------------------------
iir_params = dict(order=2, ftype='butter')
# Head ----------------------
parcellation = dict(name='aparc', abb='DK')
labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir, parc=parcellation['name'])
labels_med = [] # labels[-2:]
labels = labels[:-1]
labels_sorted, idx_sorted = rearrange_labels(labels, order='anterior_posterior') # rearrange labels
n_parc = len(labels)
fwd = mne.read_forward_solution(fwd_dir)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)
leadfield = fwd_fixed['sol']['data']
src = fwd_fixed['src']
# inv operator-----------------------------------------
inv_method = 'eLORETA'
inv_op = read_inverse_operator(inv_op_dir)
# ----------------------------------------
# load simulated data
# -----------------------------------------
simulated_raw = mne.io.read_raw_fif(raw_dir)
fs = simulated_raw.info['sfreq']
dict_simulated_data = load_pickle(simulated_data_dir)
conn_alpha_orig1 = dict_simulated_data['conn_alpha_orig1']
conn_beta_orig1 = dict_simulated_data['conn_beta_orig1']
conn_cfc_orig1 = dict_simulated_data['conn_cfc_orig1']
# --------------------------------------------------------------------
# Compute the source-space data from simulated raw
# --------------------------------------------------------------------
# alpha sources --------
raw_alpha = simulated_raw.copy()
raw_alpha.load_data()
raw_alpha.filter(l_freq=8, h_freq=12, method='iir', iir_params=iir_params)
raw_alpha.set_eeg_reference(projection=True)
pow_alpha_sensor = np.mean(raw_alpha.get_data()**2, axis=1)
plot_topomap_(pow_alpha_sensor, simulated_raw.info, title='power of alpha band')
stc_alpha_raw = mne.minimum_norm.apply_inverse_raw(raw_alpha, inverse_operator=inv_op,
lambda2=0.05, method=inv_method, pick_ori='normal')
parcel_series_alpha = extract_parcel_time_series(stc_alpha_raw.data, labels, src, mode='svd', n_select=1)
# beta sources --------
raw_beta = simulated_raw.copy()
raw_beta.load_data()
raw_beta.filter(l_freq=16, h_freq=24, method='iir', iir_params=iir_params)
raw_beta.set_eeg_reference(projection=True)
pow_beta_sensor = np.mean(raw_beta.get_data()**2, axis=1)
plot_topomap_(pow_beta_sensor, simulated_raw.info, title='power of beta band')
stc_beta_raw = mne.minimum_norm.apply_inverse_raw(raw_beta, inverse_operator=inv_op,
lambda2=0.1, method=inv_method, pick_ori='normal')
parcel_series_beta = extract_parcel_time_series(stc_beta_raw.data, labels, src, mode='svd', n_select=1)
# --------------------------------------------------------------------
# Harmoni --> minimization stage: regress out alpha from beta in each ROI
# --------------------------------------------------------------------
parcel_series_beta_corr = harmonic_removal(parcel_series_alpha, parcel_series_beta, int(fs), n=2, mp=True)
# --------------------------------------------------------------------
# regress out alpha from beta pair-wise
# --------------------------------------------------------------------
# Compute Connectivity ------------------------
# cross-frequency connectivity ..................
conn_mat_cfc_orig = compute_conn_2D_parallel(parcel_series_alpha, parcel_series_beta, 1, 2, fs, 'abs')
conn_mat_beta_orig = compute_conn_2D_parallel(parcel_series_beta, parcel_series_beta, 1, 1, fs, 'imag')
conn_mat_alpha_orig = compute_conn_2D_parallel(parcel_series_alpha, parcel_series_alpha, 1, 1, fs, 'imag')
# within-frequency connectivity ..................
conn_mat_beta_corr = \
compute_conn_2D_parallel(parcel_series_beta_corr, parcel_series_beta_corr, 1, 1, fs, 'imag')
conn_mat_cfc_corr = compute_conn_2D_parallel(parcel_series_alpha, parcel_series_beta_corr, 1, 2, fs, 'abs')
# --------------------------------------------------------------------
# rearrange label
# --------------------------------------------------------------------
beta_orig1 = np.abs(conn_beta_orig1[idx_sorted, :][:, idx_sorted])
alpha_orig1 = np.abs(conn_alpha_orig1[idx_sorted, :][:, idx_sorted])
cfc_orig1 = np.abs(conn_cfc_orig1[idx_sorted, :][:, idx_sorted])
beta_orig = np.abs(conn_mat_beta_orig[idx_sorted, :][:, idx_sorted])
alpha_orig = np.abs(conn_mat_alpha_orig[idx_sorted, :][:, idx_sorted])
cfc_orig = conn_mat_cfc_orig[idx_sorted, :][:, idx_sorted]
cfc_corr = conn_mat_cfc_corr[idx_sorted, :][:, idx_sorted]
beta_corr = | np.abs(conn_mat_beta_corr[idx_sorted, :][:, idx_sorted]) | numpy.abs |
from typing import Any
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from tqdm import tqdm
from changedet.algos.base import MetaAlgo
from changedet.algos.catalog import AlgoCatalog
from changedet.utils import InitialChangeMask, contrast_stretch, np_weight_stats
@AlgoCatalog.register("irmad")
class IRMAD(MetaAlgo):
"""Iteratively Reweighted Multivariate Alteration Detection
The Multivariate Alteration Detection (MAD) algorithm aims to identify a linear transformation
that minimises the correlation between the canonical components of the two images thereby
maximising change information. Iteratively Reweighted (IR)-MAD is an improvement on the MAD
approach where observations are iteratively reweighted in order to establish a better no change
background which allows better separability between change and no-change.
Accepted flags
--------------
- niter = Number of iterations IRMAD should be run
- sig = Change map significance level
- icm = Initial change mask
References
----------
- <NAME>. (2007). The regularized iteratively reweighted MAD method for change detection
in multi- and hyperspectral data. IEEE Transactions on Image Processing, 16(2):463–478. Internet
http://www2.compute.dtu.dk/pubdb/pubs/4695-full.html.
"""
@classmethod
def run(cls, im1: np.ndarray, im2: np.ndarray, **flags: Any) -> np.ndarray:
"""Run IRMAD algorithm.
Args:
im1 (np.ndarray): Image 1 array
im2 (np.ndarray): Image 2 array
flags (dict): Flags for the algorithm
Run `changedet --algo irmad algo_obj --help` for information on flags.
"""
niter = flags.get("niter", 10)
sig = flags.get("sig", 0.0001)
apply_icm = flags.get("icm", False)
if apply_icm:
raise NotImplementedError(
"Initial Change Mask is under construction and not ready for use"
)
logger = flags["logger"]
logger.info(
"Running IRMAD algorithm for %d iteration(s) with significance level %f",
niter,
sig,
)
ch1, r1, c1 = im1.shape
m = r1 * c1
N = ch1
im1r = im1.reshape(N, m).T
im2r = im2.reshape(N, m).T
# Calculate ICM
if apply_icm:
icm = InitialChangeMask()
change_mask = icm.prepare(im1, im2, plot=True)
if change_mask is None:
logger.warn("Invalid threshold. Skipping ICM")
change_mask = | np.ones((m, 1)) | numpy.ones |
# -*- coding: utf-8 -*-
'''
@Time : 2020/05/06 21:09
@Author : Tianxiaomo
@File : dataset.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
import os
import random
import sys
import cv2
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
def rand_uniform_strong(min, max):
if min > max:
swap = min
min = max
max = swap
return random.random() * (max - min) + min
def rand_scale(s):
scale = rand_uniform_strong(1, s)
if random.randint(0, 1) % 2:
return scale
return 1. / scale
def rand_precalc_random(min, max, random_part):
if max < min:
swap = min
min = max
max = swap
return (random_part * (max - min)) + min
def fill_truth_detection(bboxes, num_boxes, classes, flip, dx, dy, sx, sy, net_w, net_h):
if bboxes.shape[0] == 0:
return bboxes, 10000
np.random.shuffle(bboxes)
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(np.where(((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy)) |
((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx)) |
((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0)) |
((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0)))[0])
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
if bboxes.shape[0] == 0:
return bboxes, 10000
bboxes = bboxes[np.where((bboxes[:, 4] < classes) & (bboxes[:, 4] >= 0))[0]]
if bboxes.shape[0] > num_boxes:
bboxes = bboxes[:num_boxes]
min_w_h = np.array([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]]).min()
bboxes[:, 0] *= (net_w / sx)
bboxes[:, 2] *= (net_w / sx)
bboxes[:, 1] *= (net_h / sy)
bboxes[:, 3] *= (net_h / sy)
if flip:
temp = net_w - bboxes[:, 0]
bboxes[:, 0] = net_w - bboxes[:, 2]
bboxes[:, 2] = temp
return bboxes, min_w_h
def rect_intersection(a, b):
minx = max(a[0], b[0])
miny = max(a[1], b[1])
maxx = min(a[2], b[2])
maxy = min(a[3], b[3])
return [minx, miny, maxx, maxy]
def image_data_augmentation(mat, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur,
truth):
try:
img = mat
oh, ow, _ = img.shape
pleft, ptop, swidth, sheight = int(pleft), int(ptop), int(swidth), int(sheight)
# crop
src_rect = [pleft, ptop, swidth + pleft, sheight + ptop] # x1,y1,x2,y2
img_rect = [0, 0, ow, oh]
new_src_rect = rect_intersection(src_rect, img_rect) # 交集
dst_rect = [max(0, -pleft), max(0, -ptop), max(0, -pleft) + new_src_rect[2] - new_src_rect[0],
max(0, -ptop) + new_src_rect[3] - new_src_rect[1]]
# cv2.Mat sized
if (src_rect[0] == 0 and src_rect[1] == 0 and src_rect[2] == img.shape[0] and src_rect[3] == img.shape[1]):
sized = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
else:
cropped = np.zeros([sheight, swidth, 3])
cropped[:, :, ] = np.mean(img, axis=(0, 1))
cropped[dst_rect[1]:dst_rect[3], dst_rect[0]:dst_rect[2]] = \
img[new_src_rect[1]:new_src_rect[3], new_src_rect[0]:new_src_rect[2]]
# resize
sized = cv2.resize(cropped, (w, h), cv2.INTER_LINEAR)
# flip
if flip:
# cv2.Mat cropped
sized = cv2.flip(sized, 1) # 0 - x-axis, 1 - y-axis, -1 - both axes (x & y)
# HSV augmentation
# cv2.COLOR_BGR2HSV, cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2BGR, cv2.COLOR_HSV2RGB
if dsat != 1 or dexp != 1 or dhue != 0:
if img.shape[2] >= 3:
hsv_src = cv2.cvtColor(sized.astype(np.float32), cv2.COLOR_RGB2HSV) # RGB to HSV
hsv = cv2.split(hsv_src)
hsv[1] *= dsat
hsv[2] *= dexp
hsv[0] += 179 * dhue
hsv_src = cv2.merge(hsv)
sized = np.clip(cv2.cvtColor(hsv_src, cv2.COLOR_HSV2RGB), 0, 255) # HSV to RGB (the same as previous)
else:
sized *= dexp
if blur:
if blur == 1:
dst = cv2.GaussianBlur(sized, (17, 17), 0)
# cv2.bilateralFilter(sized, dst, 17, 75, 75)
else:
ksize = (blur / 2) * 2 + 1
dst = cv2.GaussianBlur(sized, (ksize, ksize), 0)
if blur == 1:
img_rect = [0, 0, sized.cols, sized.rows]
for b in truth:
left = (b.x - b.w / 2.) * sized.shape[1]
width = b.w * sized.shape[1]
top = (b.y - b.h / 2.) * sized.shape[0]
height = b.h * sized.shape[0]
roi(left, top, width, height)
roi = roi & img_rect
dst[roi[0]:roi[0] + roi[2], roi[1]:roi[1] + roi[3]] = sized[roi[0]:roi[0] + roi[2],
roi[1]:roi[1] + roi[3]]
sized = dst
if gaussian_noise:
noise = np.array(sized.shape)
gaussian_noise = min(gaussian_noise, 127)
gaussian_noise = max(gaussian_noise, 0)
cv2.randn(noise, 0, gaussian_noise) # mean and variance
sized = sized + noise
except:
print("OpenCV can't augment image: " + str(w) + " x " + str(h))
sized = mat
return sized
def filter_truth(bboxes, dx, dy, sx, sy, xd, yd):
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(np.where(((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy)) |
((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx)) |
((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0)) |
((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0)))[0])
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
bboxes[:, 0] += xd
bboxes[:, 2] += xd
bboxes[:, 1] += yd
bboxes[:, 3] += yd
return bboxes
def blend_truth_mosaic(out_img, img, bboxes, w, h, cut_x, cut_y, i_mixup,
left_shift, right_shift, top_shift, bot_shift):
left_shift = min(left_shift, w - cut_x)
top_shift = min(top_shift, h - cut_y)
right_shift = min(right_shift, cut_x)
bot_shift = min(bot_shift, cut_y)
if i_mixup == 0:
bboxes = filter_truth(bboxes, left_shift, top_shift, cut_x, cut_y, 0, 0)
out_img[:cut_y, :cut_x] = img[top_shift:top_shift + cut_y, left_shift:left_shift + cut_x]
if i_mixup == 1:
bboxes = filter_truth(bboxes, cut_x - right_shift, top_shift, w - cut_x, cut_y, cut_x, 0)
out_img[:cut_y, cut_x:] = img[top_shift:top_shift + cut_y, cut_x - right_shift:w - right_shift]
if i_mixup == 2:
bboxes = filter_truth(bboxes, left_shift, cut_y - bot_shift, cut_x, h - cut_y, 0, cut_y)
out_img[cut_y:, :cut_x] = img[cut_y - bot_shift:h - bot_shift, left_shift:left_shift + cut_x]
if i_mixup == 3:
bboxes = filter_truth(bboxes, cut_x - right_shift, cut_y - bot_shift, w - cut_x, h - cut_y, cut_x, cut_y)
out_img[cut_y:, cut_x:] = img[cut_y - bot_shift:h - bot_shift, cut_x - right_shift:w - right_shift]
return out_img, bboxes
def draw_box(img, bboxes):
for b in bboxes:
img = cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
return img
class Yolo_dataset(Dataset):
def __init__(self, lable_path, cfg, train=True):
super(Yolo_dataset, self).__init__()
if cfg.mixup == 2:
print("cutmix=1 - isn't supported for Detector")
raise
elif cfg.mixup == 2 and cfg.letter_box:
print("Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters")
raise
self.cfg = cfg
self.train = train
truth = {}
f = open(lable_path, 'r', encoding='utf-8')
for line in f.readlines():
data = line.split(" ")
truth[data[0]] = []
for i in data[1:]:
truth[data[0]].append([int(float(j)) for j in i.split(',')])
self.truth = truth
self.imgs = list(self.truth.keys())
def __len__(self):
return len(self.truth.keys())
def __getitem__(self, index):
if not self.train:
return self._get_val_item(index)
img_path = self.imgs[index]
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
use_mixup = self.cfg.mixup
if random.randint(0, 1):
use_mixup = 0
if use_mixup == 3:
min_offset = 0.2
cut_x = random.randint(int(self.cfg.w * min_offset), int(self.cfg.w * (1 - min_offset)))
cut_y = random.randint(int(self.cfg.h * min_offset), int(self.cfg.h * (1 - min_offset)))
r1, r2, r3, r4, r_scale = 0, 0, 0, 0, 0
dhue, dsat, dexp, flip, blur = 0, 0, 0, 0, 0
gaussian_noise = 0
out_img = np.zeros([self.cfg.h, self.cfg.w, 3])
out_bboxes = []
for i in range(use_mixup + 1):
if i != 0:
img_path = random.choice(list(self.truth.keys()))
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img is None:
continue
oh, ow, oc = img.shape
dh, dw, dc = np.array(np.array([oh, ow, oc]) * self.cfg.jitter, dtype=np.int)
dhue = rand_uniform_strong(-self.cfg.hue, self.cfg.hue)
dsat = rand_scale(self.cfg.saturation)
dexp = rand_scale(self.cfg.exposure)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
flip = random.randint(0, 1) if self.cfg.flip else 0
if (self.cfg.blur):
tmp_blur = random.randint(0, 2) # 0 - disable, 1 - blur background, 2 - blur the whole image
if tmp_blur == 0:
blur = 0
elif tmp_blur == 1:
blur = 1
else:
blur = self.cfg.blur
if self.cfg.gaussian and random.randint(0, 1):
gaussian_noise = self.cfg.gaussian
else:
gaussian_noise = 0
if self.cfg.letter_box:
img_ar = ow / oh
net_ar = self.cfg.w / self.cfg.h
result_ar = img_ar / net_ar
# print(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if result_ar > 1: # sheight - should be increased
oh_tmp = ow / net_ar
delta_h = (oh_tmp - oh) / 2
ptop = ptop - delta_h
pbot = pbot - delta_h
# print(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
else: # swidth - should be increased
ow_tmp = oh * net_ar
delta_w = (ow_tmp - ow) / 2
pleft = pleft - delta_w
pright = pright - delta_w
# printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
truth, min_w_h = fill_truth_detection(bboxes, self.cfg.boxes, self.cfg.classes, flip, pleft, ptop, swidth,
sheight, self.cfg.w, self.cfg.h)
if (min_w_h / 8) < blur and blur > 1: # disable blur if one of the objects is too small
blur = min_w_h / 8
ai = image_data_augmentation(img, self.cfg.w, self.cfg.h, pleft, ptop, swidth, sheight, flip,
dhue, dsat, dexp, gaussian_noise, blur, truth)
if use_mixup == 0:
out_img = ai
out_bboxes = truth
if use_mixup == 1:
if i == 0:
old_img = ai.copy()
old_truth = truth.copy()
elif i == 1:
out_img = cv2.addWeighted(ai, 0.5, old_img, 0.5)
out_bboxes = np.concatenate([old_truth, truth], axis=0)
elif use_mixup == 3:
if flip:
tmp = pleft
pleft = pright
pright = tmp
left_shift = int(min(cut_x, max(0, (-int(pleft) * self.cfg.w / swidth))))
top_shift = int(min(cut_y, max(0, (-int(ptop) * self.cfg.h / sheight))))
right_shift = int(min((self.cfg.w - cut_x), max(0, (-int(pright) * self.cfg.w / swidth))))
bot_shift = int(min(self.cfg.h - cut_y, max(0, (-int(pbot) * self.cfg.h / sheight))))
out_img, out_bbox = blend_truth_mosaic(out_img, ai, truth.copy(), self.cfg.w, self.cfg.h, cut_x,
cut_y, i, left_shift, right_shift, top_shift, bot_shift)
out_bboxes.append(out_bbox)
# print(img_path)
if use_mixup == 3:
out_bboxes = np.concatenate(out_bboxes, axis=0)
out_bboxes1 = np.zeros([self.cfg.boxes, 5])
out_bboxes1[:min(out_bboxes.shape[0], self.cfg.boxes)] = out_bboxes[:min(out_bboxes.shape[0], self.cfg.boxes)]
return out_img, out_bboxes1
def _get_val_item(self, index):
"""
"""
img_path = self.imgs[index]
bboxes_with_cls_id = np.array(self.truth.get(img_path), dtype=np.float)
img = cv2.imread(os.path.join(self.cfg.dataset_dir, img_path))
# img_height, img_width = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (self.cfg.w, self.cfg.h))
# img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
num_objs = len(bboxes_with_cls_id)
target = {}
# boxes to coco format
boxes = bboxes_with_cls_id[...,:4]
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2] # box width, box height
target['boxes'] = torch.as_tensor(boxes, dtype=torch.float32)
target['labels'] = torch.as_tensor(bboxes_with_cls_id[...,-1].flatten(), dtype=torch.int64)
target['image_id'] = torch.tensor([get_image_id(img_path)])
target['area'] = (target['boxes'][:,3])*(target['boxes'][:,2])
target['iscrowd'] = torch.zeros((num_objs,), dtype=torch.int64)
return img, target
## MOD
def get_image_id(filename:str) -> int:
"""
Convert a string to a integer.
Make sure that the images and the `image_id`s are in one-one correspondence.
There are already `image_id`s in annotations of the COCO dataset,
in which case this function is unnecessary.
For creating one's own `get_image_id` function, one can refer to
https://github.com/google/automl/blob/master/efficientdet/dataset/create_pascal_tfrecord.py#L86
or refer to the following code (where the filenames are like 'level1_123.jpg')
>>> lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
>>> lv = lv.replace("level", "")
>>> no = f"{int(no):04d}"
>>> return int(lv+no)
"""
# raise NotImplementedError("Create your own 'get_image_id' function")
# lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
# lv = lv.replace("level", "")
# no = f"{int(no):04d}"
id = os.path.splitext(os.path.basename(filename))[0]
return int(id)
if __name__ == "__main__":
from cfg import Cfg
import matplotlib.pyplot as plt
random.seed(2020)
| np.random.seed(2020) | numpy.random.seed |
import numpy as np
def conv2d(img, kernel, padding='valid'):
assert img.ndim == 2, 'Image needs to be in 2d array'
assert kernel.ndim == 2, 'Kernel needs to be in 2d array'
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, 'Please make odd kernel size'
if img.dtype == 'uint8':
img = img/255
s1 = np.array(img.shape) + np.array(kernel.shape) - 1
fsize = 2**np.ceil(np.log2(s1)).astype('int32')
fslice = tuple([slice(0, int(sz)) for sz in s1])
new_x = np.fft.fft2(img, fsize)
new_y = np.fft.fft2(kernel, fsize)
ret = np.fft.ifft2(new_x*new_y)[fslice]
ret = ret.real
if padding == 'full':
return ret
elif padding == 'same':
p = (kernel.shape[0] - 1)//2
else: # 'valid'
p = kernel.shape[0] - 1
return ret[p:-p, p:-p]
def rgb2hsv(img):
assert img.ndim == 3, 'Image needs to be in 3d'
if img.dtype == 'uint8':
img = img/255.0
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
mx = np.max(img, axis=2)
mn = np.min(img, axis=2)
df = mx - mn + 1e-7
hsv = np.zeros_like(img)
# H
idx = np.where(mx == mn)
hsv[idx[0], idx[1], 0] = 0
idx = np.where(mx == r)
hsv[idx[0], idx[1], 0] = (60*((g[idx[0], idx[1]] - b[idx[0], idx[1]])/df[idx[0], idx[1]]) + 360).astype('int32') % 360
idx = np.where(mx == g)
hsv[idx[0], idx[1], 0] = (60*((b[idx[0], idx[1]] - r[idx[0], idx[1]])/df[idx[0], idx[1]]) + 480).astype('int32') % 360
idx = np.where(mx == b)
hsv[idx[0], idx[1], 0] = (60*((r[idx[0], idx[1]] - g[idx[0], idx[1]])/df[idx[0], idx[1]]) + 600).astype('int32') % 360
# S
idx = np.where(mx == 0)
hsv[idx[0], idx[1], 1] = 0
idx = np.where(mx != 0)
hsv[idx[0], idx[1], 1] = df[idx[0], idx[1]]/mx[idx[0], idx[1]]
# V
hsv[:, :, 2] = mx
return hsv
def rgb2gray(img, method='avg', format='rgb'):
# format exists because cv2 load image in bgr order
assert img.ndim == 3, 'Image needs to be in 3d'
if img.dtype == 'uint8':
img = img/255.0
if method == 'avg':
return np.mean(img, axis=2)
else:
R = 0.299
G = 0.587
B = 0.114
return np.dot(img[..., :3], [R, G, B]) if format == 'rgb' else np.dot(img[..., :3], [B, G, R])
def sobel(img, return_direction=False):
Kx = np.asarray([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
Ky = np.asarray([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
Gx = conv2d(img, Kx)
Gy = conv2d(img, Ky)
Gm = np.sqrt(Gx**2, Gy**2)
if return_direction:
return Gm, np.arctan2(Gy, Gx)
else:
return Gm
def make_gaussian_kernel(size, sigma):
ax = np.arange(-size//2+1, size//2+1)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2)/(2.*(sigma**2)))
return kernel/kernel.sum()
def canny(img, k=11, sigma=1, alpha=0.1, beta=0.2, return_direction=False):
if img.ndim == 3:
img = rgb2gray(img)
Kg = make_gaussian_kernel(k, sigma)
img = conv2d(img, Kg)
Gm, Gd = sobel(img, return_direction=True)
Z = non_max_suspression(Gm, Gd, alpha, beta)
T = alpha* | np.max(Gm) | numpy.max |
import torch
import numpy as np
from mushroom_rl.approximators import Regressor
from mushroom_rl.approximators.parametric import TorchApproximator
from mushroom_rl.utils.torch import to_float_tensor
from mushroom_rl.utils.dataset import parse_dataset
from mushroom_rl.utils.value_functions import compute_gae
from mushroom_rl.algorithms.actor_critic.deep_actor_critic.ppo import PPO
from mushroom_rl.utils.minibatches import minibatch_generator
class GAIL(PPO):
"""
Generative Adversarial Imitation Learning(GAIL) implementation. Uses
PPO policy updates instead of TRPO.
"Generative Adversarial Imitation Learning"
<NAME>., & <NAME>. (2016).
"""
def __init__(self, mdp_info, policy_class, policy_params,
discriminator_params, critic_params, actor_optimizer,
n_epochs_policy, n_epochs_discriminator, batch_size_policy,
eps_ppo, lam, demonstrations=None, env_reward_frac=0.0,
state_mask=None, act_mask=None,
critic_fit_params=None, discriminator_fit_params=None):
# initialize PPO agent
policy = policy_class(**policy_params)
super(GAIL, self).__init__(mdp_info, policy, actor_optimizer, critic_params,
n_epochs_policy, batch_size_policy, eps_ppo, lam,
critic_fit_params=critic_fit_params)
# discriminator params
self._discriminator_fit_params = (dict() if discriminator_fit_params is None
else discriminator_fit_params)
discriminator_params.setdefault("loss", torch.nn.BCELoss())
discriminator_params.setdefault("batch_size", 128)
self._D = Regressor(TorchApproximator, **discriminator_params)
self._n_epochs_discriminator = n_epochs_discriminator
self._env_reward_frac = env_reward_frac
self._demonstrations = demonstrations # should be: dict(states=np.array, actions=(np.array/None))
assert 0.0 <= env_reward_frac <= 1.0, "Environment reward must be between [0,1]"
assert demonstrations is not None or env_reward_frac == 1.0, "No demonstrations have been loaded"
# select which observations / actions to discriminate
if not "actions" in demonstrations:
act_mask = []
self._state_mask = np.arange(demonstrations["states"].shape[1]) \
if state_mask is None else np.array(state_mask, dtype=np.int64)
self._act_mask = np.arange(demonstrations["actions"].shape[1]) \
if act_mask is None else | np.array(act_mask, dtype=np.int64) | numpy.array |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = | np.random.randint(1, 100, [shape.rows, shape.cols]) | numpy.random.randint |
import numpy as np
def standardize(x_test, x_train):
"""
standardizes the train and test data matrices
input:
x_test: matrix which contains test data
x_train: matrix which contains train data
return:
standardized matrices x_test, x_train
"""
for i in range(x_test.shape[1]):
x_test[:, i], x_train[:, i] = standardize_col(x_test[:, i], x_train[:, i])
return x_test, x_train
def standardize_col(x1, x2):
"""
standardizes arrays of train and test data
after having set -999 values to 0
input:
x_1: column of (test) data matrix
x_2: column of (train) data matrix
return:
standardized columns x_1,x_2
"""
index_x1 = np.where(x1 == -999)
index_x2 = np.where(x2 == -999)
x1_clean = np.delete(x1, index_x1)
x2_clean = np.delete(x2, index_x2)
x_clean = np.append(x1_clean, x2_clean)
x1 = x1 - np.mean(x_clean, axis =0)
x2 = x2 - np.mean(x_clean, axis =0)
x1[index_x1] = np.mean(x_clean, axis =0)
x2[index_x2] = np.mean(x_clean, axis =0) # where -999
#x1[index_x1] = 0
#x2[index_x2] = 0 # where -999
std = np.std(np.append(x1, x2), ddof=1)
x1 = x1/std
x2 = x2/std
return x1, x2
def remove_outliers(x_train, ys_train):
"""
discards data points containing outliers,
i.e. values being far away from the mean
input:
x_train: matrix which contains train data
ys_train: array which contains labels
return:
train and label data without outliers
"""
index = []
threshold = 8.5
for i in range(x_train.shape[0]):
if np.amax(np.abs(x_train[i, :])) > threshold:
index.append(i)
x_train = np.delete(x_train, index, 0)
ys_train = | np.delete(ys_train, index, 0) | numpy.delete |
"""
Implementation of K-nearest neighbor (KNN) from scratch
where you can either use 2-loops (inefficient), 1-loop (better)
or a heavily vectorized zero-loop implementation.
Programmed by <NAME> <aladdin.persson at hotmail dot com>
* 2020-04-24 Initial coding
"""
import numpy as np
class KNearestNeighbor:
def __init__(self, k):
self.k = k
self.eps = 1e-8
def train(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X_test, num_loops=0):
if num_loops == 0:
distances = self.compute_distance_vectorized(X_test)
elif num_loops == 1:
distances = self.compute_distance_one_loop(X_test)
else:
distances = self.compute_distance_two_loops(X_test)
return self.predict_labels(distances)
def compute_distance_two_loops(self, X_test):
"""
Inefficient naive implementation, use only
as a way of understanding what kNN is doing
"""
num_test = X_test.shape[0]
num_train = self.X_train.shape[0]
distances = | np.zeros((num_test, num_train)) | numpy.zeros |
from typing import Union, Dict, List, Optional
from pathlib import Path
from datetime import date
import numpy as np
from tqdm import tqdm
import pandas as pd
from .utils import get_basin_list
from .datautils import load_forcing, load_discharge
def split_basins(
camels_root: Union[str, Path],
basin_list: Union[str, Path],
split: List[float],
store_folder: Union[str, Path],
timeseries: List[str],
dataset: List[str],
seed: int,
normalize: bool = True,
):
if isinstance(basin_list, str):
basin_list = Path(basin_list)
elif not isinstance(basin_list, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if isinstance(store_folder, str):
store_folder = Path(store_folder)
elif not isinstance(store_folder, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if sum(split) > 1:
raise ValueError(f"sum of splits must be 1, not {sum(split)}")
if len(split) not in (2, 3):
raise ValueError(f"length of split must be 2 or 3, not {len(split)}")
np.random.seed(seed)
store_folder = store_folder / f"split_seed_{seed}"
store_folder.mkdir(parents=True, exist_ok=True)
basins = np.loadtxt(basin_list, dtype="str")
np.random.shuffle(basins)
if len(split) == 2:
basins_test = basins[: int(len(basins) * split[1])]
basins_train = basins[int(len(basins) * split[1]) :]
else:
basins_test = basins[: int(len(basins) * split[2])]
basins_validation = basins[
int(len(basins) * split[2])
: int(len(basins) * split[1]) + int(len(basins) * split[2])
]
basins_train = basins[
int(len(basins) * split[1]) + int(len(basins) * split[2]) :
]
np.savetxt(store_folder / "basins_test.txt", basins_test, fmt="%s")
np.savetxt(store_folder / "basins_train.txt", basins_train, fmt="%s")
if len(split) == 3:
np.savetxt(store_folder / "basins_validation.txt", basins_validation, fmt="%s")
if normalize:
create_normalization_file(
camels_root,
store_folder / "basins_train.txt",
dataset=dataset,
timeseries=timeseries,
)
def cross_validation_split(
camels_root: Union[str, Path],
basin_list: Union[str, Path],
k: int,
test_split: float,
store_folder: Union[str, Path],
seed: int,
dataset: List[str],
timeseries: List[str],
normalize: bool = True,
):
if isinstance(basin_list, str):
basin_list = Path(basin_list)
elif not isinstance(basin_list, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if isinstance(store_folder, str):
store_folder = Path(store_folder)
elif not isinstance(store_folder, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
store_folder = store_folder / f"cross_validation_seed_{seed}"
store_folder.mkdir(parents=True, exist_ok=True)
np.random.seed(seed)
basins = np.loadtxt(basin_list, dtype="str")
np.random.shuffle(basins)
basins_test = basins[: int(len(basins) * test_split)]
basins = basins[int(len(basins) * test_split) :]
basins_split = np.array_split(basins, k)
| np.savetxt(store_folder / "basins_test.txt", basins_test, fmt="%s") | numpy.savetxt |
# Copyright (c) 2009-2017 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Module to calculate motif scoring metrics.
Includes ROC AUC, MNCP, enrichment and others, which are calculated
on the basis of motif scanning results.
"""
# External imports
from scipy.stats import stats, scoreatpercentile, kstest, fisher_exact
from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve, average_precision_score
import numpy as np
__all__ = [
"recall_at_fdr",
"fraction_fpr",
"score_at_fpr",
"enr_at_fpr",
"max_enrichment",
"phyper_at_fpr",
"mncp",
"roc_auc",
"roc_auc_xlim",
"pr_auc",
"max_fmeasure",
"ks_pvalue",
"ks_significance",
]
def requires_scores(f):
f.input_type = "score"
return f
def requires_positions(f):
f.input_type = "pos"
return f
@requires_scores
def values_to_labels(fg_vals, bg_vals):
"""
Convert two arrays of values to an array of labels and an array of scores.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
y_true : array
Labels.
y_score : array
Values.
"""
y_true = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals))))
y_score = np.hstack((fg_vals, bg_vals))
return y_true, y_score
@requires_scores
def recall_at_fdr(fg_vals, bg_vals, fdr_cutoff=0.1):
"""
Computes the recall at a specific FDR (default 10%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fdr : float, optional
The FDR (between 0.0 and 1.0).
Returns
-------
recall : float
The recall at the specified FDR.
"""
if len(fg_vals) == 0:
return 0.0
y_true, y_score = values_to_labels(fg_vals, bg_vals)
precision, recall, _ = precision_recall_curve(y_true, y_score)
fdr = 1 - precision
cutoff_index = next(i for i, x in enumerate(fdr) if x <= fdr_cutoff)
return recall[cutoff_index]
@requires_scores
def matches_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - fpr * 100)
return [sum(fg_vals >= s), sum(bg_vals >= s)]
@requires_scores
def phyper_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - fpr * 100)
table = [
[sum(fg_vals >= s), sum(bg_vals >= s)],
[sum(fg_vals < s), sum(bg_vals < s)],
]
return fisher_exact(table, alternative="greater")[1]
@requires_scores
def fraction_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the fraction positives at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - 100 * fpr)
return len(fg_vals[fg_vals >= s]) / float(len(fg_vals))
@requires_scores
def score_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Returns the motif score at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
score : float
The motif score at the specified FPR.
"""
bg_vals = np.array(bg_vals)
return scoreatpercentile(bg_vals, 100 - 100 * fpr)
@requires_scores
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = | np.array(fg_vals) | numpy.array |
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
_stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
_stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
_stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = _stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = _stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = _apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = _apply_window(x, window, axis=1, return_window=False)
yt = _apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = _apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = _apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class TestDetrend:
def setup(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
mlab.detrend_none(input, axis=1)
assert input == targ
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key='none')
assert input == targ
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key=mlab.detrend_none)
assert input == targ
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = | np.vstack(arri) | numpy.vstack |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import numpy as np
from graphdot.model.gaussian_process import LowRankApproximateGPR
np.random.seed(0)
@pytest.mark.parametrize('X', [
np.arange(25),
np.linspace(0, 1, 25),
np.linspace(-1, 1, 25),
])
@pytest.mark.parametrize('y', [
np.zeros(25),
-np.ones(25),
np.ones(25),
np.arange(25),
np.sin(np.linspace(0, 1, 25) * 2 * np.pi),
np.linspace(-1, 1, 25),
] + [
np.random.rand(25) for _ in range(5)
] + [
np.random.randn(25) for _ in range(5)
])
def test_nystrom_fit_self_consistency(X, y):
class Kernel:
def __call__(self, X, Y=None, s=0.01):
return np.exp(
-np.subtract.outer(X, Y if Y is not None else X)**2 / s**2
)
def diag(self, X):
return | np.ones_like(X) | numpy.ones_like |
import numpy as np
import pytest
from scipy.stats import (bootstrap, BootstrapDegenerateDistributionWarning,
monte_carlo_test, permutation_test)
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`method = 'BCa' is only available for one-sample statistics"
def statistic(x, y, axis):
mean1 = np.mean(x, axis)
mean2 = np.mean(y, axis)
return mean1 - mean2
with pytest.raises(ValueError, match=message):
bootstrap(([.1, .2, .3], [.1, .2, .3]), statistic, method='BCa')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
if not paired and method == 'BCa':
# should re-assess when BCa is extended
pytest.xfail(reason="BCa currently for 1-sample statistics only")
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=0)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
res = bootstrap((data,), np.mean, n_resamples=5000,
confidence_level=alpha, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum((sample.mean(axis) for sample in data))
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
with pytest.warns(BootstrapDegenerateDistributionWarning):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
assert_equal(res.standard_error, 0)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_gh15678(method):
# Check that gh-15678 is fixed: when statistic function returned a Python
# float, method="BCa" failed when trying to add a dimension to the float
rng = np.random.default_rng(354645618886684)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
data = (data,)
res = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563))
# this always worked because np.apply_along_axis returns NumPy data type
ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563), vectorized=False)
assert_allclose(res.confidence_interval, ref.confidence_interval)
assert_allclose(res.standard_error, ref.standard_error)
assert isinstance(res.standard_error, np.float64)
def test_jackknife_resample():
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = next(_resampling._jackknife_resample(x))
for i in range(shape[-1]):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
expected = np.delete(x, i, axis=-1)
assert np.array_equal(slc, expected)
y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)),
axis=-2)
assert np.array_equal(y2, y)
@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"])
def test_bootstrap_resample(rng_name):
rng = getattr(np.random, rng_name, None)
if rng is None:
pytest.skip(f"{rng_name} not available.")
rng1 = rng(0)
rng2 = rng(0)
n_resamples = 10
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1)
for i in range(n_resamples):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
js = rng_integers(rng2, 0, shape[-1], shape[-1])
expected = x[..., js]
assert np.array_equal(slc, expected)
@pytest.mark.parametrize("score", [0, 0.5, 1])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_percentile_of_score(score, axis):
shape = 10, 20, 30
np.random.seed(0)
x = np.random.rand(*shape)
p = _resampling._percentile_of_score(x, score, axis=-1)
def vectorized_pos(a, score, axis):
return np.apply_along_axis(stats.percentileofscore, axis, a, score)
p2 = vectorized_pos(x, score, axis=-1)/100
assert_allclose(p, p2, 1e-15)
def test_percentile_along_axis():
# the difference between _percentile_along_axis and np.percentile is that
# np.percentile gets _all_ the qs for each axis slice, whereas
# _percentile_along_axis gets the q corresponding with each axis slice
shape = 10, 20
np.random.seed(0)
x = np.random.rand(*shape)
q = np.random.rand(*shape[:-1]) * 100
y = _resampling._percentile_along_axis(x, q)
for i in range(shape[0]):
res = y[i]
expected = np.percentile(x[i], q[i], axis=-1)
assert_allclose(res, expected, 1e-15)
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_vectorize_statistic(axis):
# test that _vectorize_statistic vectorizes a statistic along `axis`
def statistic(*data, axis):
# an arbitrary, vectorized statistic
return sum((sample.mean(axis) for sample in data))
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
# vectorize the non-vectorized statistic
statistic2 = _resampling._vectorize_statistic(statistic_1d)
np.random.seed(0)
x = np.random.rand(4, 5, 6)
y = np.random.rand(4, 1, 6)
z = np.random.rand(1, 5, 6)
res1 = statistic(x, y, z, axis=axis)
res2 = statistic2(x, y, z, axis=axis)
assert_allclose(res1, res2)
@pytest.mark.xslow()
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_vector_valued_statistic(method):
# Generate 95% confidence interval around MLE of normal distribution
# parameters. Repeat 100 times, each time on sample of size 100.
# Check that confidence interval contains true parameters ~95 times.
# Confidence intervals are estimated and stochastic; a test failure
# does not necessarily indicate that something is wrong. More important
# than values of `counts` below is that the shapes of the outputs are
# correct.
rng = np.random.default_rng(2196847219)
params = 1, 0.5
sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng)
def statistic(data):
return stats.norm.fit(data)
res = bootstrap((sample,), statistic, method=method, axis=-1,
vectorized=False)
counts = np.sum((res.confidence_interval.low.T < params)
& (res.confidence_interval.high.T > params),
axis=0)
assert np.all(counts >= 90)
assert np.all(counts <= 100)
assert res.confidence_interval.low.shape == (2, 100)
assert res.confidence_interval.high.shape == (2, 100)
assert res.standard_error.shape == (2, 100)
# --- Test Monte Carlo Hypothesis Test --- #
class TestMonteCarloHypothesisTest:
atol = 2.5e-2 # for comparing p-value
def rvs(self, rvs_in, rs):
return lambda *args, **kwds: rvs_in(*args, random_state=rs, **kwds)
def test_input_validation(self):
# test that the appropriate error messages are raised for invalid input
def stat(x):
return stats.skewnorm(x).statistic
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, axis=1.5)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, vectorized=1.5)
message = "`rvs` must be callable."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], None, stat)
message = "`statistic` must be callable."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, None)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
alternative='ekki')
def test_batch(self):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
rng = np.random.default_rng(23492340193)
x = rng.random(10)
def statistic(x, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return stats.skewtest(x, axis=axis).statistic
statistic.counter = 0
statistic.batch_size = 0
kwds = {'sample': x, 'statistic': statistic,
'n_resamples': 1000, 'vectorized': True}
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
res1 = monte_carlo_test(batch=1, **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res2 = monte_carlo_test(batch=50, **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res3 = monte_carlo_test(**kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('axis', range(-3, 3))
def test_axis(self, axis):
# test that Nd-array samples are handled correctly for valid values
# of the `axis` parameter
rng = np.random.default_rng(2389234)
norm_rvs = self.rvs(stats.norm.rvs, rng)
size = [2, 3, 4]
size[axis] = 100
x = norm_rvs(size=size)
expected = stats.skewtest(x, axis=axis)
def statistic(x, axis):
return stats.skewtest(x, axis=axis).statistic
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
n_resamples=20000, axis=axis)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('alternative', ("less", "greater"))
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_ks_1samp(self, alternative, a):
# test that monte_carlo_test can reproduce pvalue of ks_1samp
rng = np.random.default_rng(65723433)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative)
def statistic1d(x):
return stats.ks_1samp(x, stats.norm.cdf, mode='asymp',
alternative=alternative).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
if alternative == 'greater':
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
elif alternative == 'less':
assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest))
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
@pytest.mark.parametrize('a', np.linspace(-2, 2, 5)) # skewness
def test_against_normality_tests(self, hypotest, alternative, a):
# test that monte_carlo_test can reproduce pvalue of normality tests
rng = np.random.default_rng(85723405)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = hypotest(x, alternative=alternative)
def statistic(x, axis):
return hypotest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.arange(-2, 3)) # skewness parameter
def test_against_normaltest(self, a):
# test that monte_carlo_test can reproduce pvalue of normaltest
rng = np.random.default_rng(12340513)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = stats.normaltest(x)
def statistic(x, axis):
return stats.normaltest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_cramervonmises(self, a):
# test that monte_carlo_test can reproduce pvalue of cramervonmises
rng = np.random.default_rng(234874135)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.cramervonmises(x, stats.norm.cdf)
def statistic1d(x):
return stats.cramervonmises(x, stats.norm.cdf).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('dist_name', ('norm', 'logistic'))
@pytest.mark.parametrize('i', range(5))
def test_against_anderson(self, dist_name, i):
# test that monte_carlo_test can reproduce results of `anderson`. Note:
# `anderson` does not provide a p-value; it provides a list of
# significance levels and the associated critical value of the test
# statistic. `i` used to index this list.
# find the skewness for which the sample statistic matches one of the
# critical values provided by `stats.anderson`
def fun(a):
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
return expected.statistic - expected.critical_values[i]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sol = root(fun, x0=0)
assert(sol.success)
# get the significance level (p-value) associated with that critical
# value
a = sol.x[0]
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
expected_stat = expected.statistic
expected_p = expected.significance_level[i]/100
# perform equivalent Monte Carlo test and compare results
def statistic1d(x):
return stats.anderson(x, dist_name).statistic
dist_rvs = self.rvs(getattr(stats, dist_name).rvs, rng)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = monte_carlo_test(x, dist_rvs,
statistic1d, n_resamples=1000,
vectorized=False, alternative='greater')
assert_allclose(res.statistic, expected_stat)
assert_allclose(res.pvalue, expected_p, atol=2*self.atol)
class TestPermutationTest:
rtol = 1e-14
# -- Input validation -- #
def test_permutation_test_iv(self):
def stat(x, y, axis):
return stats.ttest_ind((x, y), axis).statistic
message = "each sample in `data` must contain two or more ..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1]), stat)
message = "`data` must be a tuple containing at least two samples"
with pytest.raises(ValueError, match=message):
permutation_test((1,), stat)
with pytest.raises(TypeError, match=message):
permutation_test(1, stat)
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5)
message = "`permutation_type` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
permutation_type="ekki")
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
random_state='herring')
# -- Test Parameters -- #
@pytest.mark.parametrize('permutation_type',
['pairings', 'samples', 'independent'])
def test_batch(self, permutation_type):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
np.random.seed(0)
x = np.random.rand(10)
y = np.random.rand(10)
def statistic(x, y, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
statistic.counter = 0
statistic.batch_size = 0
kwds = {'n_resamples': 1000, 'permutation_type': permutation_type,
'vectorized': True, 'random_state': 0}
res1 = stats.permutation_test((x, y), statistic, batch=1, **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
statistic.counter = 0
res2 = stats.permutation_test((x, y), statistic, batch=50, **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
statistic.counter = 0
res3 = stats.permutation_test((x, y), statistic, batch=1000, **kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('permutation_type, exact_size',
[('pairings', special.factorial(3)**2),
('samples', 2**3),
('independent', special.binom(6, 3))])
def test_permutations(self, permutation_type, exact_size):
# make sure that the `permutations` parameter is respected by checking
# the size of the null distribution
np.random.seed(0)
x = np.random.rand(3)
y = np.random.rand(3)
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'permutation_type': permutation_type,
'vectorized': True, 'random_state': 0}
res = stats.permutation_test((x, y), statistic, n_resamples=3, **kwds)
assert_equal(res.null_distribution.size, 3)
res = stats.permutation_test((x, y), statistic, **kwds)
assert_equal(res.null_distribution.size, exact_size)
# -- Randomized Permutation Tests -- #
# To get reasonable accuracy, these next three tests are somewhat slow.
# Originally, I had them passing for all combinations of permutation type,
# alternative, and RNG, but that takes too long for CI. Instead, split
# into three tests, each testing a particular combination of the three
# parameters.
def test_randomized_test_against_exact_both(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='both
alternative, rng = 'less', 0
nx, ny, permutations = 8, 9, 24000
assert special.binom(nx + ny, nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'independent',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.slow()
def test_randomized_test_against_exact_samples(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='samples'
alternative, rng = 'greater', None
nx, ny, permutations = 15, 15, 32000
assert 2**nx > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x - y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
def test_randomized_test_against_exact_pairings(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='pairings'
alternative = 'two-sided'
try:
rng = np.random.default_rng(1)
except AttributeError:
rng = np.random.RandomState(1)
nx, ny, permutations = 8, 8, 40000
assert special.factorial(nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = [x]
def statistic1d(x):
return stats.pearsonr(x, y)[0]
statistic = _resampling._vectorize_statistic(statistic1d)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.parametrize('alternative', ('less', 'greater'))
# Different conventions for two-sided p-value here VS ttest_ind.
# Eventually, we can add multiple options for the two-sided alternative
# here in permutation_test.
@pytest.mark.parametrize('permutations', (30, 1e9))
@pytest.mark.parametrize('axis', (0, 1, 2))
def test_against_permutation_ttest(self, alternative, permutations, axis):
# check that this function and ttest_ind with permutations give
# essentially identical results.
x = np.arange(3*4*5).reshape(3, 4, 5)
y = np.moveaxis(np.arange(4)[:, None, None], 0, axis)
res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis,
random_state=0, alternative=alternative)
def statistic(x, y, axis):
return stats.ttest_ind(x, y, axis=axis).statistic
res2 = permutation_test((x, y), statistic, vectorized=True,
n_resamples=permutations,
alternative=alternative, axis=axis,
random_state=0)
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
if permutations == 30:
# Even one-sided p-value is defined differently in ttest_ind for
# randomized tests. See permutation_test references [2] and [3].
assert_allclose((res1.pvalue*30+1)/31, res2.pvalue, rtol=self.rtol)
else:
assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
# -- Independent (Unpaired) Sample Tests -- #
def test_against_kstest(self):
np.random.seed(0)
x = stats.norm.rvs(size=4, scale=1)
y = stats.norm.rvs(size=5, loc=3, scale=3)
alternative = 'greater'
expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact')
def statistic1d(x, y):
return stats.ks_2samp(x, y, mode='asymp',
alternative=alternative).statistic
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_ansari(self, alternative):
np.random.seed(0)
x = stats.norm.rvs(size=4, scale=1)
y = stats.norm.rvs(size=5, scale=3)
# ansari has a different convention for 'alternative'
alternative_correspondence = {"less": "greater",
"greater": "less",
"two-sided": "two-sided"}
alternative_scipy = alternative_correspondence[alternative]
expected = stats.ansari(x, y, alternative=alternative_scipy)
def statistic1d(x, y):
return stats.ansari(x, y).statistic
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_mannwhitneyu(self, alternative):
np.random.seed(0)
x = stats.uniform.rvs(size=(3, 5, 2), loc=0)
y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05)
expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative)
def statistic(x, y, axis):
return stats.mannwhitneyu(x, y, axis=axis).statistic
res = permutation_test((x, y), statistic, vectorized=True,
n_resamples=np.inf, alternative=alternative,
axis=1)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
def test_against_cvm(self):
np.random.seed(0)
x = stats.norm.rvs(size=4, scale=1)
y = stats.norm.rvs(size=5, loc=3, scale=3)
expected = stats.cramervonmises_2samp(x, y, method='exact')
def statistic1d(x, y):
return stats.cramervonmises_2samp(x, y,
method='asymptotic').statistic
# cramervonmises_2samp has only one alternative, greater
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='greater')
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.xslow()
@pytest.mark.parametrize('axis', (-1, 2))
def test_vectorized_nsamp_ptype_both(self, axis):
# Test that permutation_test with permutation_type='independent' works
# properly for a 3-sample statistic with nd array samples of different
# (but compatible) shapes and ndims. Show that exact permutation test
# and random permutation tests approximate SciPy's asymptotic pvalues
# and that exact and random permutation test results are even closer
# to one another (than they are to the asymptotic results).
np.random.seed(0)
# Three samples, different (but compatible) shapes with different ndims
x = np.random.rand(3)
y = np.random.rand(1, 3, 2)
z = np.random.rand(2, 1, 4)
data = (x, y, z)
# Define the statistic (and pvalue for comparison)
def statistic1d(*data):
return stats.kruskal(*data).statistic
def pvalue1d(*data):
return stats.kruskal(*data).pvalue
statistic = _resampling._vectorize_statistic(statistic1d)
pvalue = _resampling._vectorize_statistic(pvalue1d)
# Calculate the expected results
x2 = np.broadcast_to(x, (2, 3, 3)) # broadcast manually because
y2 = np.broadcast_to(y, (2, 3, 2)) # _vectorize_statistic doesn't
z2 = np.broadcast_to(z, (2, 3, 4))
expected_statistic = statistic(x2, y2, z2, axis=axis)
expected_pvalue = pvalue(x2, y2, z2, axis=axis)
# Calculate exact and randomized permutation results
kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
'permutation_type': 'independent', 'random_state': 0}
res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds)
# Check results
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, atol=6e-2)
assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
# -- Paired-Sample Tests -- #
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_wilcoxon(self, alternative):
np.random.seed(0)
x = stats.uniform.rvs(size=(3, 6, 2), loc=0)
y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05)
# We'll check both 1- and 2-sample versions of the same test;
# we expect identical results to wilcoxon in all cases.
def statistic_1samp_1d(z):
# 'less' ensures we get the same of two statistics every time
return stats.wilcoxon(z, alternative='less').statistic
def statistic_2samp_1d(x, y):
return stats.wilcoxon(x, y, alternative='less').statistic
def test_1d(x, y):
return stats.wilcoxon(x, y, alternative=alternative)
test = _resampling._vectorize_statistic(test_1d)
expected = test(x, y, axis=1)
expected_stat = expected[0]
expected_p = expected[1]
res1 = permutation_test((x-y,), statistic_1samp_1d, vectorized=False,
permutation_type='samples', n_resamples=np.inf,
alternative=alternative, axis=1)
res2 = permutation_test((x, y), statistic_2samp_1d, vectorized=False,
permutation_type='samples', n_resamples=np.inf,
alternative=alternative, axis=1)
# `wilcoxon` returns a different statistic with 'two-sided'
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
if alternative != 'two-sided':
| assert_allclose(res2.statistic, expected_stat, rtol=self.rtol) | numpy.testing.assert_allclose |
# get level-1 model for each dataset, for example the averaged model
# for data 5-57-21k, which includes predictions from xgboost,
# random forest ..., projected features from pca, ica, and selected features
# from some models, the model should be able to give prediction score
# for each biz_id, used for level-2 model
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os import path
import os
import pickle
import argparse
import numpy as np
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from sets import Set
from utils import *
np.random.seed(123213)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data',
choices=['21k', 'v3', 'colHist'],
default="21k")
parser.add_argument('--prob',
choices=['75', '50'],
default='75')
parser.add_argument('--reps',
choices=['5', '9'],
default='5')
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
def get_data_train(args):
data_dir = path.join("../feature/", args.reps + "_" + args.prob)
if args.data == '21k':
X_path = path.join(data_dir, "X_train.npy")
y_path = path.join(data_dir, "y_train.npy")
X = np.load(X_path)
y = np.load(y_path)
sel_range = range(0, 2048)
return X[:, sel_range], y[:, args.yix]
elif args.data == 'colHist':
X_path = path.join(data_dir, "X_train.npy")
y_path = path.join(data_dir, "y_train.npy")
X = np.load(X_path)
y = np.load(y_path)
sel_range = range(2048, 2048 + 772)
return X[:, sel_range], y[:, args.yix]
elif args.data == 'v3':
X_path = path.join(data_dir, "X_train.npy")
y_path = path.join(data_dir, "y_train.npy")
X = np.load(X_path)
y = np.load(y_path)
sel_range = range(2048 + 772, 6916)
return X[:, sel_range], y[:, args.yix]
def get_data_test(args):
data_dir = path.join("../feature/", args.reps + "_" + args.prob)
if args.data == '21k':
X_path = path.join(data_dir, "X_test.npy")
biz_path = path.join(data_dir, "bizlist.npy")
X = np.load(X_path)
biz_list = np.load(biz_path)
sel_range = range(0, 2048)
return X[:, sel_range], biz_list
elif args.data == 'colHist':
X_path = path.join(data_dir, "X_test.npy")
biz_path = path.join(data_dir, "bizlist.npy")
X = np.load(X_path)
biz_list = np.load(biz_path)
sel_range = range(2048, 2048 + 772)
return X[:, sel_range], biz_list
elif args.data == 'v3':
X_path = path.join(data_dir, "X_test.npy")
biz_path = path.join(data_dir, "bizlist.npy")
X = np.load(X_path)
biz_list = np.load(biz_path)
sel_range = range(2048 + 772, 6916)
return X[:, sel_range], biz_list
class level1_model:
def get_fitted(self):
pass
def get_feature_list(self, X_train=None, y_train=None):
pass
def predict(self, X_test):
pass
class level1_xgboost(level1_model):
def __init__(self, args):
self.args = args
model_dir = "_".join(("xgboost_f1", args.reps, args.prob, args.data))
model_name = "model_" + str(args.yix) + ".pkl"
outfold_name = "outFold_" + str(args.yix) + ".npy"
model_path = path.join(path.dirname(__file__), model_dir, model_name)
outfold_path = path.join(
path.dirname(__file__), model_dir, outfold_name)
with open(model_path, 'r') as f:
self.clf = pickle.load(f)
self.outfold_pred = np.load(outfold_path)
def get_fitted(self):
return np.array([self.outfold_pred]).T
def get_feature_list(self, X_train=None, y_train=None):
f_score = self.clf.get_fscore()
k = np.array(f_score.keys())
v = np.array(f_score.values())
f_ix = k[v > np.mean(v)]
f_list = map(lambda x: int(x[1:]), f_ix)
return {args.data: f_list}
def predict(self, X_test):
dtest = xgb.DMatrix(X_test)
return np.array([self.clf.predict(dtest, output_margin=True)]).T
class level1_rf(level1_model):
def __init__(self, args):
self.args = args
model_dir = "_".join(("randomForest", args.reps, args.prob, args.data))
model_name = "model_" + str(args.yix) + ".pkl"
outfold_name = "outFold_" + str(args.yix) + ".npy"
param_name = "param_" + str(args.yix) + ".pkl"
model_path = path.join(path.dirname(__file__), model_dir, model_name)
outfold_path = path.join(
path.dirname(__file__), model_dir, outfold_name)
param_path = path.join(path.dirname(__file__), model_dir, param_name)
with open(model_path, 'r') as f:
self.clf = pickle.load(f)
with open(param_path, 'r') as f:
self.params = pickle.load(f)
self.outfold_pred = np.load(outfold_path)
def get_fitted(self):
return np.array([self.outfold_pred]).T
def get_feature_list(self, X_train, y_train):
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**self.params)
clf.fit(X_train, y_train, sample_weight)
score = np.array(clf.feature_importances_)
sel_ix = np.arange(score.shape[0])[score > np.mean(score)]
return {args.data: list(sel_ix)}
def predict(self, X_test):
pred_list = []
for cclf in self.clf:
pred = cclf.predict_proba(X_test)[:, 1]
pred_list.append(pred)
return np.array([np.mean(np.array(pred_list), axis=0)]).T
class level1_extree(level1_model):
def __init__(self, args):
self.args = args
model_dir = "_".join(("extraTree", args.reps, args.prob, args.data))
model_name = "model_" + str(args.yix) + ".pkl"
outfold_name = "outFold_" + str(args.yix) + ".npy"
param_name = "param_" + str(args.yix) + ".pkl"
model_path = path.join(path.dirname(__file__), model_dir, model_name)
outfold_path = path.join(
path.dirname(__file__), model_dir, outfold_name)
param_path = path.join(path.dirname(__file__), model_dir, param_name)
with open(model_path, 'r') as f:
self.clf = pickle.load(f)
with open(param_path, 'r') as f:
self.params = pickle.load(f)
self.outfold_pred = np.load(outfold_path)
def get_fitted(self):
return np.array([self.outfold_pred]).T
def get_feature_list(self, X_train, y_train):
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = ExtraTreesClassifier(**self.params)
clf.fit(X_train, y_train, sample_weight)
score = np.array(clf.feature_importances_)
sel_ix = np.arange(score.shape[0])[score > np.mean(score)]
return {args.data: list(sel_ix)}
def predict(self, X_test):
pred_list = []
for cclf in self.clf:
pred = cclf.predict_proba(X_test)[:, 1]
pred_list.append(pred)
return np.array([np.mean(np.array(pred_list), axis=0)]).T
class level1_pca(level1_model):
def __init__(self, args):
self.args = args
model_dir = "_".join(("pca", args.reps, args.prob, args.data))
model_name = "model.pkl"
outfold_name = "outFold.npy"
param_name = "param.pkl"
model_path = path.join(path.dirname(__file__), model_dir, model_name)
outfold_path = path.join(
path.dirname(__file__), model_dir, outfold_name)
param_path = path.join(path.dirname(__file__), model_dir, param_name)
with open(model_path, 'r') as f:
self.clf = pickle.load(f)
with open(param_path, 'r') as f:
self.params = pickle.load(f)
self.outfold_pred = np.load(outfold_path)
def get_fitted(self):
return self.outfold_pred
def get_feature_list(self, X_train=None, y_train=None):
return {args.data: []}
def predict(self, X_test):
return self.clf.transform(X_test)
class level1_ica(level1_model):
def __init__(self, args):
self.args = args
model_dir = "_".join(("ica", args.reps, args.prob, args.data))
model_name = "model.pkl"
outfold_name = "outFold.npy"
param_name = "param.pkl"
model_path = path.join(path.dirname(__file__), model_dir, model_name)
outfold_path = path.join(
path.dirname(__file__), model_dir, outfold_name)
param_path = path.join(path.dirname(__file__), model_dir, param_name)
with open(model_path, 'r') as f:
self.clf = pickle.load(f)
with open(param_path, 'r') as f:
self.params = pickle.load(f)
self.outfold_pred = | np.load(outfold_path) | numpy.load |
# pylint: disable=C0302, C1802, C0209, R1705, W0201
import os, resource
from typing import Any, List, Optional, Tuple, Union
import numpy as np
from maddness.util.least_squares import ( # type: ignore[attr-defined]
_XW_encoded,
encoded_lstsq,
sparse_encoded_lstsq,
)
from maddness.util.hash_function_helper import ( # type: ignore[attr-defined]
Bucket,
MultiSplit,
create_codebook_start_end_idxs,
)
def learn_binary_tree_splits(
X: np.ndarray,
nsplits: int = 4, # levels of resulting binary hash tree
return_prototypes: bool = True,
# return_buckets: bool = False,
X_orig: Optional[np.ndarray] = None,
check_x_dims: int = 8, # can be used to check more or less dims with max losses
learn_quantize_params: bool = False,
) -> Tuple[list, int, Union[list, np.ndarray]]:
assert nsplits <= 4 # >4 splits means >16 split_vals for this func's impl
X = X.copy().astype(np.float32)
N, D = X.shape # D amount of IDx per codebook
X_orig = X.copy() if X_orig is None else X_orig.copy()
# initially, one big bucket with everything
buckets = [
Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0), point_ids=np.arange(N))
]
# total_loss = sum([bucket.loss for bucket in buckets])
# print("================================")
# print("learn_binary_tree_splits(): initial loss: ", total_loss)
splits = []
col_losses = np.zeros(D, dtype=np.float32)
OFFSET = 0.0
SCALE_BY = 1.0
X = X * SCALE_BY + OFFSET
# X_orig = X_orig + OFFSET
for _ in range(nsplits):
# in the original code there are more strategies: eigenvec, bucket_eigenvecs, kurtosis
# dim_heuristic == "bucket_sse":
col_losses[:] = 0 # set all zero
for buck in buckets:
col_losses += buck.col_sum_sqs() # return variance
try_dims = np.argsort(col_losses)[::-1][
:check_x_dims
] # choose biggest column losses
losses = np.zeros(len(try_dims), dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
for d, dim in enumerate(try_dims):
split_vals = [] # each bucket contributes one split val
for _, buck in enumerate(buckets):
# X.shape (50000, 32), dim is a number 0-31, val 1D, loss 1D
val, loss = buck.optimal_split_val(X, dim, X_orig=X_orig)
losses[d] += loss
if d > 0 and losses[d] >= np.min(losses[:d]):
# early stop
break
split_vals.append(val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim] # Vector (50000, 1)
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
# TODO: why this specific scale value??
scale = 254.0 / upper_val
# if learn_quantize_params == "int16":
scale = 2.0 ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
# TODO: look at clippings
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
else:
split.offset = OFFSET
split.scaleby = SCALE_BY
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
val = use_split_vals[i]
new_buckets += list(buck.split(X, dim=best_dim, val=val, X_orig=X_orig))
buckets = new_buckets
# pylint: disable=consider-using-generator
loss = sum([bucket.loss for bucket in buckets])
# print("learn_binary_tree_splits(): returning loss: ", loss)
if return_prototypes:
prototypes = np.vstack([buck.col_means() for buck in buckets])
assert prototypes.shape == (len(buckets), X.shape[1])
return splits, loss, prototypes
# if return_buckets:
return splits, loss, buckets
def init_and_learn_hash_function(
X: np.ndarray, C: int, pq_perm_algo: str = "start"
) -> Tuple[np.ndarray, list, np.ndarray, list]:
_, D = X.shape
K = 16
X = X.astype(np.float32)
X_error = X.copy().astype(np.float32)
X_orig = X
all_prototypes = np.zeros((C, K, D), dtype=np.float32)
all_splits: List = []
pq_idxs = create_codebook_start_end_idxs(X, C, algo=pq_perm_algo)
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(C):
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
# in original code there is other selections based on PCA and disjoint PCA
use_X_error = X_error[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_binary_tree_splits(
use_X_error, X_orig=use_X_orig, return_prototypes=False
)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# update residuals and store prototypes
# idxs = IDs that were look at for current codebook
# buck.point_ids = rows that landed in certain K
# [ 0 5 21 ... 99950 99979 99999] (N=100000)
# X_error = is here still the A input
# remove centroid from all the points that lie in a certain codebook
# set prototype value
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
# print(b, idxs, buck.point_ids, centroid, buck.col_means())
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
X_error[buck.point_ids] -= centroid
# update centroid here in case we want to regularize it somehow
all_prototypes[c, b] = centroid
# X_error = A_input - all_centroids
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"Learning progress {X.shape}-{C}-{K}: {c + 1}/{C} "
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
return X_error, all_splits, all_prototypes, all_buckets
def apply_hash_function(X: np.ndarray, splits: List[MultiSplit]) -> np.ndarray:
N, _ = X.shape
nsplits = len(splits)
assert len(splits) >= 1
# original code had a distinction: not sure why
group_ids = np.zeros(N, dtype=np.int32)
for i in range(nsplits):
split = splits[i]
vals = split.vals[group_ids]
indicators = split.preprocess_x(X[:, split.dim]) > vals
group_ids = (group_ids * 2) + indicators
return group_ids
def maddness_encode(
X: np.ndarray, multisplits_lists: list[list[MultiSplit]]
) -> np.ndarray:
N, _ = X.shape
C = len(multisplits_lists)
A_enc = np.empty((N, C), dtype=np.int32, order="F") # column-major
for c in range(C):
A_enc[:, c] = apply_hash_function(X, multisplits_lists[c])
return np.ascontiguousarray(A_enc)
# @_memory.cache
def learn_proto_and_hash_function(
X: np.ndarray, C: int, lut_work_const: int = -1
) -> Tuple[list[list[MultiSplit]], np.ndarray, np.ndarray]:
_, D = X.shape
K = 16
used_perm_algo = "start" # or end
X_orig = X.astype(np.float32)
# X_error = X_orig - centroid shape: [N, D]
X_error, all_splits, all_prototypes, _ = init_and_learn_hash_function(
X, C, pq_perm_algo=used_perm_algo
)
msv_orig = (X_orig * X_orig).mean()
mse_error = (X_error * X_error).mean()
print(
"X_error mse / X mean squared value: ",
mse_error / msv_orig,
mse_error,
msv_orig,
np.mean(X_orig),
)
squared_diff = np.square(X_orig - X_error).mean()
print("Error to Original squared diff", squared_diff)
# optimize prototypes discriminatively conditioned on assignments
# applying g(A) [N, C] with values from 0-K (50000, 16)
A_enc = maddness_encode(X, all_splits)
# optimizing prototypes
if lut_work_const != 1: # if it's 1, equivalent to just doing PQ
if lut_work_const < 0:
# print("fitting dense lstsq to X_error")
W = encoded_lstsq(A_enc=A_enc, Y=X_error)
else:
W, _ = sparse_encoded_lstsq(
A_enc, X_error, nnz_blocks=lut_work_const, pq_perm_algo=used_perm_algo
)
all_prototypes_delta = W.reshape(C, K, D)
all_prototypes += all_prototypes_delta
# check how much improvement we got
X_error -= _XW_encoded(A_enc, W) # if we fit to X_error
mse_res = (X_error * X_error).mean()
print("X_error mse / X mse after lstsq: ", mse_res / msv_orig)
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"After Ridge regression {X.shape}-{C}-{K}"
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
report_array = np.array(
[
mse_error,
msv_orig,
mse_error / msv_orig,
np.mean(X_orig),
mse_res,
mse_res / msv_orig,
ram_usage / (1024 * 1024),
]
)
return all_splits, all_prototypes, report_array
def maddness_lut(q: np.ndarray, all_prototypes: np.ndarray) -> np.ndarray:
q = q.reshape(1, 1, -1) # all_prototypes is shape C, K, D
return (q * all_prototypes).sum(axis=2) # C, K
def maddness_quantize_luts(luts: np.ndarray, force_power_of_2: bool = True) -> Any:
mins = luts.min(axis=(0, 2))
maxs = luts.max(axis=(0, 2))
gaps = maxs - mins
gap = np.max(gaps)
if force_power_of_2:
exponent = np.ceil(np.log2(gap))
scale = 2 ** int(-exponent) # scale is a power of 2, so can just shift
scale *= 255.5 - 1e-10 # so max val is at most 255
else:
scale = (255.5 - 1e-10) / gap
offsets = mins[np.newaxis, :, np.newaxis]
luts_quantized = (luts - offsets) * scale
luts_quantized = (luts_quantized + 0.5).astype(np.int64)
assert np.min(luts_quantized) >= 0
assert np.max(luts_quantized) <= 255.0
return luts_quantized, offsets.sum(), scale
# pylint: disable=R0902
class MaddnessMatmul:
def __init__(self, C: int = 16, lut_work_const: int = -1) -> None:
# checks
if lut_work_const > 0 and lut_work_const > C:
raise Exception("lut_work_const > C: {} > {}".format(lut_work_const, C))
self.lut_work_const = lut_work_const
self.C = C
self.K = 16
self.A_enc: Optional[np.ndarray] = None
self.luts: Optional[np.ndarray] = None
self.quantize_lut = False
self.upcast_every = 16
self.upcast_every = min(self.C, self.upcast_every)
# important otherwise wrong summation
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
self.accumulate_how = "mean" # sum
def _learn_hash_buckets_and_prototypes(self, A: np.ndarray) -> None:
_, D = A.shape
if D < self.C:
raise Exception("D < C: {} < {}".format(D, self.C))
self.splits_lists, self.prototypes, _ = learn_proto_and_hash_function(
A, self.C, lut_work_const=self.lut_work_const
)
def _encode_A(self, A: np.ndarray) -> np.ndarray:
idxs = maddness_encode(A, self.splits_lists)
# offsets = [ 0 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240]
offsets = np.arange(self.C, dtype=np.int32) * self.K
return idxs + offsets
def _create_lut(self, B: np.ndarray) -> Tuple[np.ndarray, float, float]:
B = np.atleast_2d(B)
luts = | np.zeros((B.shape[0], self.C, self.K)) | numpy.zeros |
"""
io images
---------
Module to format images to be used by this framework.
"""
import numpy as np
from itertools import product
def create_locs_features_from_image(image):
"""Create locs and features from image.
Parameters
----------
image: np.ndarray
the image matrix represented using numpy.
Returns
-------
locs: np.ndarray
the locations positions. The grid positions of the image.
feats: np.ndarray
the intensity of the image.
"""
sh = image.shape
if len(sh) == 2:
image = image.reshape((sh[0], sh[1], 1))
map2indices = lambda x, y: x + y*sh[0]
n = np.prod(sh[:2])
locs = | np.zeros((n, 2)) | numpy.zeros |
# a421_one_movie_recommender.py
# A basic movie recommendation code using average for a single user and single movie.
# This code is based on the netflix-style-recommender project shared on GitHub.
# It was written by Nikhil22.
# The code has been modified from its original version.
import numpy as np
# define the movies, users, and different ratings
movies = ["Back to the Future", "Guardians of the Galaxy", "Avatar", "Indiana Jones", "2001: A Space Odyssey"]
genres = ["Action", "Adventure", "Science Fiction", "Comedy"]
#TODO 1 change these values to the names of the students in your group
users = ["Tyler", "John", "Sarah", "Matt"]
#TODO 2 paste your ratings tables here
movie_ratings =[[8,8,0,5],
[6,6,0,7],
[3,8,0,7],
[0,9,0,0],
[0,0,0,0]]
user_preferences = [[4,5,5,3],
[3,3,5,4],
[3,3,3,4],
[4,4,3,5]]
movie_genre = [[0.6, 0.0, 0.3, 0.1],
[0.2, 0.3, 0.3, 0.2],
[0.3, 0.3, 0.4, 0.0],
[0.6, 0.2, 0.0, 0.2],
[0.4, 0.0, 0.6, 0.0]]
# TODO Your ratings, rate the five movies in the list below
# notice how your recommendations change when you add a rating for 1 movie
your_ratings = np.zeros((5, 1))
your_ratings[0] = 5 # rating for Back to the Future
your_ratings[1] = 1 # rating for Guardians of the Galaxy
your_ratings[2] = 3 # rating for Avatar
your_ratings[3] = 3 # rating for Indiana Jones
your_ratings[4] = 6 # rating for The Matrix
# --- Normalization Process ---
# ratings, movies_features, and user_prefs are arrays which are more structured lists
ratings = np.array(movie_ratings)
movie_features = np.array(movie_genre)
user_prefs = | np.array(user_preferences) | numpy.array |
"""Cox-Ingersoll-Ross (CIR) model
dx_t = (drift - speed * x_t) * dt + volatility * sqrt(x_t) * dw_t
d[sqrt(x_t)] = ((drift / 2 - volatility ** 2 / 4) / sqrt(x_t) - speed / 2 * sqrt(x_t)) * dt + volatility / 2 * dw_t
"""
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
from time_series_model_template import TimeSeriesModel
class CoxIngersollRoss(TimeSeriesModel):
def __init__(self):
self.drift = None
self.speed = None
self.volatility = None
def fit_parameters(self, dt, x, method='OLS'):
methods = {'OLS': self.fit_parameters_ols,
'MLE': self.fit_parameters_mle}
methods[method](dt, x)
def fit_parameters_ols(self, dt, x):
sqrt_x = np.sqrt(x)
y = np.diff(x) / sqrt_x[:-1]
xx = np.vstack((sqrt_x[:-1], 1. / sqrt_x[:-1])).T
(a, b), (sum_error2,), _, _ = np.linalg.lstsq(xx, y, rcond=-1)
self.speed = - a / dt
self.drift = b / dt
sigma2 = sum_error2 / (len(y) - 2) / dt
self.volatility = np.sqrt(sigma2)
def fit_parameters_ols_2(self, dt, x):
sqrt_x = np.sqrt(x)
xx = np.vstack((sqrt_x[:-1], 1 / sqrt_x[:-1])).T
y = sqrt_x[1:]
(a, b), (sum_error2,), _, _ = np.linalg.lstsq(xx, y, rcond=-1)
self.speed = 2 * (1 - a) / dt
sigma2 = sum_error2 / (len(y) - 2) * 4 / dt
self.volatility = np.sqrt(sigma2)
self.drift = 2 * (b / dt + sigma2 / 4)
def fit_parameters_mle(self, dt, x):
pass
def simulate(self, x0, t):
x = np.zeros_like(t)
# sqrt_x = np.zeros_like(t)
x[0] = x0
# sqrt_x[0] = np.sqrt(x0)
for i in range(1, len(t)):
dt = t[i] - t[i-1]
dw = np.random.normal(0, 1) * np.sqrt(dt)
# sqrt_x[i] = sqrt_x[i - 1] + (
# (self.drift / 2 - self.volatility ** 2 / 4) / sqrt_x[i - 1] - self.speed / 2 * sqrt_x[i - 1]
# ) * dt + self.volatility / 2 * dw
x[i] = x[i - 1] + (self.drift - self.speed * x[i - 1]) * dt + self.volatility * np.sqrt(x[i - 1]) * dw
# x = sqrt_x
return x
def main():
np.random.seed(0)
v_model = CoxIngersollRoss()
v_speed, v_drift, v_volatility = .1, .01, 0.03
dt = 1./252
t = np.arange(0, 30., dt)
n_run = 100
speeds = []
drifts = []
volatility = []
for _ in range(n_run):
v_model.speed = v_speed
v_model.drift = v_drift
v_model.volatility = v_volatility
x0 = np.random.normal(1e-2, v_volatility * np.sqrt(dt))
x = v_model.simulate(x0, t)
v_model.fit_parameters(dt, x)
plt.plot(t, x)
speeds.append(v_model.parameters['speed'])
drifts.append(v_model.parameters['drift'])
volatility.append(v_model.parameters['volatility'])
# break
str_format = '.2f'
pprint(f"volatility: {np.min(volatility):.3f} {np.mean(volatility):.3f} {np.max(volatility):.3f}")
pprint(f"speed: {np.min(speeds):.3f} {np.mean(speeds):.3f} {np.max(speeds):.3f}")
pprint(f"drift: {np.min(drifts):.3f} {np.mean(drifts):.3f} { | np.max(drifts) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 10:52:58 2021
@author: <NAME>
"""
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib
import torch
#import region_labels
def extract_weights_single(dataset, view, model, training_type, shot_n, cv_n):
if model == 'sag':
fs_path = '{}/weights/W_{}_{}_{}_view_{}_{}.pickle'.format(model, training_type, dataset, model, view, shot_n)
cv_path = '{}/weights/W_MainModel_{}_{}_{}_view_{}_CV_{}.pickle'.format(model,training_type, dataset, model, view, cv_n)
else:
fs_path = '{}/weights/W_{}_{}_{}{}_view_{}.pickle'.format(model, training_type, dataset, model, shot_n, view)
cv_path = '{}/weights/W_MainModel_{}_{}_{}_CV_{}_view_{}.pickle'.format(model,training_type, dataset, model, cv_n, view)
if training_type == 'Few_Shot':
x_path = fs_path
else:
x_path = cv_path
with open(x_path,'rb') as f:
weights = pickle.load(f)
if model == 'sag':
weights_vector = torch.mean(weights['w'], 1).detach().numpy()
if model == 'diffpool':
weights_vector = torch.mean(weights['w'], 1).detach().numpy()
if model == 'gcn':
weights_vector = weights['w'].squeeze().detach().numpy()
if model == 'gat':
weights_vector = weights['w'].squeeze().detach().numpy()
if model == 'gunet':
weights_vector = torch.mean(weights['w'], 0).detach().numpy()
return weights_vector
def extract_weights(dataset, view, model, training_type):
runs = []
if training_type == 'Few_Shot':
for shot_i in range(5):
runs.append(extract_weights_single(dataset, view, model, training_type, shot_i, 0))
if training_type == '3Fold':
for cv_i in range(3):
runs.append(extract_weights_single(dataset, view, model, training_type, 0, cv_i))
if training_type == '5Fold':
for cv_i in range(5):
runs.append(extract_weights_single(dataset, view, model, training_type, 0, cv_i))
if training_type == '10Fold':
for cv_i in range(10):
runs.append(extract_weights_single(dataset, view, model, training_type, 0, cv_i))
runs = np.array(runs)
weights = np.mean(runs, axis=0)
return weights
def top_biomarkers(weights, K_i):
weights_normalized = np.abs(weights)
result = []
w_sorted = weights_normalized.argsort() #verified
for i in range(1, 1+K_i):
result.append(w_sorted[-1*i])
return result
def sim(nodes1, nodes2):
if len(nodes1)==len(nodes2):
counter = 0
for i in nodes1:
for k in nodes2:
if i==k:
counter+=1
return counter/len(nodes1)
else:
print('nodes vectors are not caompatible')
def sim_respective(nodes1, nodes2):
if len(nodes1)==len(nodes2):
counter = 0
for i in range(len(nodes1)):
if nodes1[i]==nodes2[i]:
counter+=1
return counter/len(nodes1)
else:
print('nodes vectors are not caompatible')
def sim_respective_weighted(rank1, rank2, strength1, strength2): # ongoing
if len(rank1)==len(rank2) and len(strength1) == len(strength2) and len(rank1)==len(strength1):
n_views = max(rank1)
differences_rank = np.abs(rank1 - rank2)
differences_rank_weights = 1 - (differences_rank *1/n_views)
differences_strength = np.abs(strength1 - strength2)
max_diff_strength = max(differences_strength)
differences_strength_norm = differences_strength/max_diff_strength
differences_strength_weights = 1 - differences_strength_norm
sum_weights = np.sum(differences_rank_weights*differences_strength_weights)
weighted_intersection = sum_weights/len(rank1)
return weighted_intersection
else:
print('nodes vectors are not caompatible')
def view_specific_rep(dataset,view,training_type, models):
#models = ['diffpool', 'gat', 'gcn', 'gunet', 'sag']
Ks = [5, 10, 15, 20]
rep = np.zeros([len(models), len(models), len(Ks)])
for i in range(rep.shape[0]):
for j in range(rep.shape[1]):
weights_i = extract_weights(dataset, view, models[i], training_type)
weights_j = extract_weights(dataset, view, models[j], training_type)
a=4
for k in range(rep.shape[2]):
top_bio_i = top_biomarkers(weights_i, Ks[k])
top_bio_j = top_biomarkers(weights_j, Ks[k])
rep[i,j,k] = sim(top_bio_i, top_bio_j)
rep_mean = np.mean(rep, axis=2)
rep_dict = {}
rep_dict['matrix'] = rep_mean
rep_dict['dataset'] = dataset
rep_dict['view'] = view
rep_dict['models'] = models
rep_dict['training_type'] = training_type
return rep_dict
def overall_avg_rep_cv_fixed(data_dict, training_type):
dataset = data_dict['dataset']
views = data_dict['views']
models = data_dict['models']
rep = np.zeros([len(models), len(models), len(views)])
for view in views:
rep_dict = view_specific_rep(dataset,view,training_type,models)
rep[:,:,view] = rep_dict['matrix']
rep_mean = np.mean(rep, axis=2)
rep_dict = {}
rep_dict['matrix'] = rep_mean
rep_dict['models'] = models
rep_dict['dataset'] = dataset
rep_dict['training_type'] = training_type
return rep_dict
def overall_avg_rep(data_dict):
models = data_dict['models']
dataset = data_dict['dataset']
training_types = data_dict['training_types']
rep = np.zeros([len(models), len(models), len(training_types)])
for i in range(len(training_types)):
if i ==2:
s = 3
rep_dict = overall_avg_rep_cv_fixed(data_dict, training_types[i])
rep[:,:,i] = rep_dict['matrix']
rep_mean = np.mean(rep, axis=2)
rep_dict = {}
rep_dict['matrix'] = rep_mean
rep_dict['models'] = models
rep_dict['dataset'] = dataset
return rep_dict
def overall_avg_rep_plot(rep_dict, save_fig=False):
models = rep_dict['models']
df_cm = pd.DataFrame(rep_dict['matrix'], index = [i for i in models], columns = [i for i in models])
plt.figure(figsize = (10,7))
sns.heatmap(df_cm, annot=True ,vmin=0, vmax=1)
title_msg = 'Overall average reproducibility Dataset: '+rep_dict['dataset']
plt.title(title_msg)
if save_fig==True:
plt.savefig("./imgs/Rep_"+ rep_dict['dataset'] + '_avg'+".png")
plt.show()
plt.close()
def GNN_specific_rep_vect(dataset,views,training_type, model):
#models = ['diffpool', 'gat', 'gcn', 'gunet', 'sag']
Ks = [5, 10, 15, 20]
rep = np.zeros([len(views), len(views), len(Ks)])
for i in range(rep.shape[0]):
for j in range(rep.shape[1]):
weights_i = extract_weights(dataset, views[i], model, training_type)
weights_j = extract_weights(dataset, views[j], model, training_type)
for k in range(rep.shape[2]):
top_bio_i = top_biomarkers(weights_i, Ks[k])
top_bio_j = top_biomarkers(weights_j, Ks[k])
rep[i,j,k] = sim(top_bio_i, top_bio_j)
rep_mean = np.mean(rep, axis=2)
rep_vec = np.sum(rep_mean, axis=1)
rep_dict = {}
rep_dict['strength_vector'] = rep_vec
rep_dict['rank_vector'] = rep_vec.argsort()[::-1].argsort() # verified
rep_dict['dataset'] = dataset
rep_dict['views'] = views
rep_dict['model'] = model
rep_dict['training_type'] = training_type
return rep_dict
def overall_corr_rep_cv_fixed(data_dict, training_type):
dataset = data_dict['dataset']
views = data_dict['views']
models = data_dict['models']
rep_rank = np.zeros([len(models), len(models)])
rep_strength = np.zeros([len(models), len(models)])
for i in range(len(models)):
rep_vect_i = GNN_specific_rep_vect(dataset,views,training_type, models[i])
rep_rank_i = rep_vect_i['rank_vector']
rep_strength_i = rep_vect_i['strength_vector']
for j in range(len(models)):
rep_vect_j = GNN_specific_rep_vect(dataset,views,training_type, models[j])
rep_rank_j = rep_vect_j['rank_vector']
rep_strength_j = rep_vect_j['strength_vector']
corr_rank = np.corrcoef(rep_rank_i, rep_rank_j)
corr_strength = np.corrcoef(rep_strength_i, rep_strength_j)
rep_rank[i,j] = corr_rank[0,1]
rep_strength[i,j] = corr_strength[0,1]
#rep_mean = np.mean(rep, axis=2)
rep_dict = {}
rep_dict['rank_matrix'] = rep_rank
rep_dict['strength_matrix'] = rep_strength
rep_dict['models'] = models
rep_dict['dataset'] = dataset
rep_dict['training_type'] = training_type
return rep_dict
def overall_corr_rep(data_dict):
models = data_dict['models']
dataset = data_dict['dataset']
training_types = data_dict['training_types']
rep_rank = np.zeros([len(models), len(models), len(training_types)])
rep_strength = np.zeros([len(models), len(models), len(training_types)])
for i in range(len(training_types)):
rep_dict = overall_corr_rep_cv_fixed(data_dict, training_types[i])
rep_rank[:,:,i] = rep_dict['rank_matrix']
rep_strength[:,:,i] = rep_dict['strength_matrix']
rep_rank_mean = np.mean(rep_rank, axis=2)
rep_strength_mean = | np.mean(rep_strength, axis=2) | numpy.mean |
from initialise_parameters import params, control_data, categories, calculated_categories, change_in_categories
from math import exp, ceil, log, floor, sqrt
import numpy as np
from scipy.integrate import ode
from scipy.stats import norm, gamma
import pandas as pd
import statistics
import os
import pickle
from tqdm import tqdm
cwd = os.getcwd()
import pdb
def timing_function(t,time_vector):
for ii in range(ceil(len(time_vector)/2)):
if t>=time_vector[2*ii] and t<time_vector[2*ii+1]:
return True
# if wasn't in any of these time interval
return False
##
# -----------------------------------------------------------------------------------
##
class simulator:
def __init__(self):
pass
##
#-----------------------------------------------------------------
##
def ode_system(self,t,y, # state of system
infection_matrix,age_categories,symptomatic_prob,hospital_prob,critical_prob,beta, # params
latentRate,removalRate,hospRate,deathRateICU,deathRateNoIcu, # more params
better_hygiene,remove_symptomatic,remove_high_risk,ICU_capacity # control
):
##
dydt = np.zeros(y.shape)
I_vec = [ y[params.I_ind+i*params.number_compartments] for i in range(age_categories)]
H_vec = [ y[params.H_ind+i*params.number_compartments] for i in range(age_categories)]
A_vec = [ y[params.A_ind+i*params.number_compartments] for i in range(age_categories)]
total_I = sum(I_vec)
# better hygiene
if timing_function(t,better_hygiene['timing']): # control in place
control_factor = better_hygiene['value']
else:
control_factor = 1
# removing symptomatic individuals
if timing_function(t,remove_symptomatic['timing']): # control in place
remove_symptomatic_rate = min(total_I,remove_symptomatic['rate']) # if total_I too small then can't take this many off site at once
else:
remove_symptomatic_rate = 0
S_removal = 0
for i in range(age_categories - remove_high_risk['n_categories_removed'],age_categories):
S_removal += y[params.S_ind + i*params.number_compartments] # add all old people to remove
for i in range(age_categories):
# removing symptomatic individuals
# these are put into Q ('quarantine');
quarantine_sick = remove_symptomatic_rate * y[params.I_ind + i*params.number_compartments]/total_I # no age bias in who is moved
# removing susceptible high risk individuals
# these are moved into O ('offsite')
if i in range(age_categories - remove_high_risk['n_categories_removed'],age_categories) and timing_function(t,remove_high_risk['timing']):
remove_high_risk_people = min(remove_high_risk['rate'],S_removal) # only removing high risk (within time control window). Can't remove more than we have
else:
remove_high_risk_people = 0
# ICU capacity
if sum(H_vec)>0: # can't divide by 0
ICU_for_this_age = ICU_capacity['value'] * y[params.H_ind + i*params.number_compartments]/sum(H_vec)
# ICU beds allocated on a first come, first served basis based on the numbers in hospital
else:
ICU_for_this_age = ICU_capacity['value']
# ODE system:
# S
dydt[params.S_ind + i*params.number_compartments] = (- y[params.S_ind + i*params.number_compartments] * control_factor * beta * (np.dot(infection_matrix[i,:],I_vec) + params.AsymptInfectiousFactor*np.dot(infection_matrix[i,:],A_vec))
- remove_high_risk_people * y[params.S_ind + i*params.number_compartments] / S_removal )
# E
dydt[params.E_ind + i*params.number_compartments] = ( y[params.S_ind + i*params.number_compartments] * control_factor * beta * (np.dot(infection_matrix[i,:],I_vec) + params.AsymptInfectiousFactor*np.dot(infection_matrix[i,:],A_vec))
- latentRate * y[params.E_ind + i*params.number_compartments])
# I
dydt[params.I_ind + i*params.number_compartments] = (latentRate * (1-symptomatic_prob[i]) * y[params.E_ind + i*params.number_compartments]
- removalRate * y[params.I_ind + i*params.number_compartments]
- quarantine_sick
)
# A
dydt[params.A_ind + i*params.number_compartments] = (latentRate * symptomatic_prob[i] * y[params.E_ind + i*params.number_compartments]
- removalRate * y[params.A_ind + i*params.number_compartments])
# H
dydt[params.H_ind + i*params.number_compartments] = (removalRate * (hospital_prob[i]) * y[params.I_ind + i*params.number_compartments]
- hospRate * y[params.H_ind + i*params.number_compartments]
# + deathRateNoIcu * (1 - params.death_prob) * max(0,y[params.C_ind + i*params.number_compartments] - ICU_for_this_age) # recovered despite no ICU (0, since now assume death_prob is 1)
+ deathRateICU * (1 - params.death_prob_with_ICU) * min(y[params.C_ind + i*params.number_compartments],ICU_for_this_age) # recovered from ICU
+ (hospital_prob[i]) * params.quarant_rate * y[params.Q_ind + i*params.number_compartments] # proportion of removed people who were hospitalised once returned
)
# Critical care (ICU)
dydt[params.C_ind + i*params.number_compartments] = ( min(hospRate * (critical_prob[i]) * y[params.H_ind + i*params.number_compartments],
ICU_for_this_age - y[params.C_ind + i*params.number_compartments]
+ deathRateICU * y[params.C_ind + i*params.number_compartments] # with ICU treatment
) # amount entering is minimum of: amount of beds available**/number needing it
# **including those that will be made available by new deaths
- deathRateICU * y[params.C_ind + i*params.number_compartments] # with ICU treatment
)
# Uncared - no ICU
dydt[params.U_ind + i*params.number_compartments] = ( hospRate * (critical_prob[i]) * y[params.H_ind + i*params.number_compartments] # number needing care
- min(hospRate * (critical_prob[i]) * y[params.H_ind + i*params.number_compartments],
ICU_for_this_age - y[params.C_ind + i*params.number_compartments]
+ deathRateICU * y[params.C_ind + i*params.number_compartments]
) # minus number who get it (these entered category C)
- deathRateNoIcu * y[params.U_ind + i*params.number_compartments] # without ICU treatment
)
# R
dydt[params.R_ind + i*params.number_compartments] = (removalRate * (1 - hospital_prob[i]) * y[params.I_ind + i*params.number_compartments]
+ removalRate * y[params.A_ind + i*params.number_compartments]
+ hospRate * (1 - critical_prob[i]) * y[params.H_ind + i*params.number_compartments]
+ (1 - hospital_prob[i]) * params.quarant_rate * y[params.Q_ind + i*params.number_compartments] # proportion of removed people who recovered once returned
)
# D
dydt[params.D_ind + i*params.number_compartments] = (deathRateNoIcu * y[params.U_ind + i*params.number_compartments] # died without ICU treatment (all cases that don't get treatment die)
+ deathRateICU * (params.death_prob_with_ICU) * y[params.C_ind + i*params.number_compartments] # died despite attempted ICU treatment
)
# O
dydt[params.O_ind + i*params.number_compartments] = remove_high_risk_people * y[params.S_ind + i*params.number_compartments] / S_removal
# Q
dydt[params.Q_ind + i*params.number_compartments] = quarantine_sick - params.quarant_rate * y[params.Q_ind + i*params.number_compartments]
return dydt
##
#--------------------------------------------------------------------
##
def run_model(self,T_stop,population,population_frame,infection_matrix,beta,
control_dict, # control
latentRate = params.latent_rate,
removalRate = params.removal_rate,
hospRate = params.hosp_rate,
deathRateICU = params.death_rate_with_ICU,
deathRateNoIcu = params.death_rate # more params
):
E0 = 0 # exposed
I0 = 1/population # sympt
A0 = 1/population # asympt
R0 = 0 # recovered
H0 = 0 # hospitalised/needing hospital care
C0 = 0 # critical (cared)
D0 = 0 # dead
O0 = 0 # offsite
Q0 = 0 # quarantined
U0 = 0 # critical (uncared)
S0 = 1 - I0 - R0 - C0 - H0 - D0 - O0 - Q0 - U0
age_categories = int(population_frame.shape[0])
y0 = np.zeros(params.number_compartments*age_categories)
population_vector = np.asarray(population_frame.Population_structure)
# initial conditions
for i in range(age_categories):
y0[params.S_ind + i*params.number_compartments] = (population_vector[i]/100)*S0
y0[params.E_ind + i*params.number_compartments] = (population_vector[i]/100)*E0
y0[params.I_ind + i*params.number_compartments] = (population_vector[i]/100)*I0
y0[params.A_ind + i*params.number_compartments] = (population_vector[i]/100)*A0
y0[params.R_ind + i*params.number_compartments] = (population_vector[i]/100)*R0
y0[params.H_ind + i*params.number_compartments] = (population_vector[i]/100)*H0
y0[params.C_ind + i*params.number_compartments] = (population_vector[i]/100)*C0
y0[params.D_ind + i*params.number_compartments] = (population_vector[i]/100)*D0
y0[params.O_ind + i*params.number_compartments] = (population_vector[i]/100)*O0
y0[params.Q_ind + i*params.number_compartments] = (population_vector[i]/100)*Q0
y0[params.U_ind + i*params.number_compartments] = (population_vector[i]/100)*U0
symptomatic_prob = np.asarray(population_frame.p_symptomatic)
hospital_prob = np.asarray(population_frame.p_hospitalised)
critical_prob = np.asarray(population_frame.p_critical)
sol = ode(self.ode_system).set_f_params(infection_matrix,age_categories,symptomatic_prob,hospital_prob,critical_prob,beta, # params
latentRate,removalRate,hospRate,deathRateICU,deathRateNoIcu, # more params
control_dict['better_hygiene'],control_dict['remove_symptomatic'],control_dict['remove_high_risk'],control_dict['ICU_capacity'] # control params
)
tim = np.linspace(0,T_stop, T_stop+1) # 1 time value per day
sol.set_initial_value(y0,tim[0])
y_out = np.zeros((len(y0),len(tim)))
i2 = 0
y_out[:,0] = sol.y
for t in tim[1:]:
if sol.successful():
sol.integrate(t)
i2=i2+1
y_out[:,i2] = sol.y
else:
raise RuntimeError('ode solver unsuccessful')
y_plot = np.zeros((len(categories.keys()), len(tim) ))
for name in calculated_categories:
y_plot[categories[name]['index'],:] = y_out[categories[name]['index'],:]
for i in range(1, population_frame.shape[0]): # age_categories
y_plot[categories[name]['index'],:] = y_plot[categories[name]['index'],:] + y_out[categories[name]['index'] + i*params.number_compartments,:]
for name in change_in_categories: # daily change in
name_changed_var = name[-1] # name of the variable we want daily change of
y_plot[categories[name]['index'],:] = np.concatenate([[0],np.diff(y_plot[categories[name_changed_var]['index'],:])])
# finally,
E = y_plot[categories['CE']['index'],:]
I = y_plot[categories['CI']['index'],:]
A = y_plot[categories['CA']['index'],:]
y_plot[categories['Ninf']['index'],:] = [E[i] + I[i] + A[i] for i in range(len(E))] # change in total number of people with active infection
return {'y': y_out,'t': tim, 'y_plot': y_plot}
#--------------------------------------------------------------------
def GeneratePercentiles(sols):
n_time_points = len(sols[0]['t'])
y_plot = np.zeros((len(categories.keys()), len(sols) , n_time_points ))
for k, sol in enumerate(sols):
sol['y'] = np.asarray(sol['y'])
for name in categories.keys():
y_plot[categories[name]['index'],k,:] = sol['y_plot'][categories[name]['index']]
y_L95, y_U95, y_LQ, y_UQ, y_median = [np.zeros((len(categories.keys()),n_time_points)) for i in range(5)]
for name in categories.keys():
y_L95[categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[categories[name]['index'],:,i],2.5) for i in range(n_time_points) ])
y_LQ[categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[categories[name]['index'],:,i],25) for i in range(n_time_points) ])
y_UQ[categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[categories[name]['index'],:,i],75) for i in range(n_time_points) ])
y_U95[categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[categories[name]['index'],:,i],97.5) for i in range(n_time_points) ])
y_median[categories[name]['index'],:] = np.asarray([statistics.median(y_plot[categories[name]['index'],:,i]) for i in range(n_time_points) ])
return [y_U95, y_UQ, y_LQ, y_L95, y_median]
def GenerateInfectionMatrix(population_frame,camp,control_dict):
infection_matrix = np.asarray(pd.read_csv(os.path.join(os.path.dirname(cwd),'Parameters/Contact_matrix_' + camp + '.csv'))) #np.ones((population_frame.shape[0],population_frame.shape[0]))
infection_matrix = infection_matrix[:,1:]
next_generation_matrix = np.matmul(0.01*np.diag(population_frame.Population_structure) , infection_matrix )
largest_eigenvalue = max(np.linalg.eig(next_generation_matrix)[0]) # max eigenvalue
beta_list = np.linspace(params.beta_list[0],params.beta_list[2],20)
beta_list = np.real((1/largest_eigenvalue)* beta_list) # in case eigenvalue imaginary
if control_dict['shielding']['used']: # increase contact within group and decrease between groups
divider = -1 # determines which groups separated. -1 means only oldest group separated from the rest
infection_matrix[:divider,:divider] = params.shield_increase*infection_matrix[:divider,:divider]
infection_matrix[:divider,divider:] = params.shield_decrease*infection_matrix[:divider,divider:]
infection_matrix[divider:,:divider] = params.shield_decrease*infection_matrix[divider:,:divider]
infection_matrix[divider:,divider] = params.shield_increase*infection_matrix[divider:,divider:]
return infection_matrix, beta_list, largest_eigenvalue
def simulate_range_of_R0s(population_frame, population, control_dict, camp, t_stop=200): # gives solution for middle R0, as well as solutions for a range of R0s between an upper and lower bound
infection_matrix, beta_list, largest_eigenvalue = GenerateInfectionMatrix(population_frame,camp,control_dict)
sols = []
sols_raw = {}
for beta in beta_list:
result=simulator().run_model(T_stop=t_stop,infection_matrix=infection_matrix,population=population,population_frame=population_frame,beta=beta,control_dict=control_dict)
sols.append(result)
sols_raw[beta*largest_eigenvalue/params.removal_rate]=result
[y_U95, y_UQ, y_LQ, y_L95, y_median] = GeneratePercentiles(sols)
StandardSol = []
StandardSol.append(simulator().run_model(T_stop=t_stop,infection_matrix=infection_matrix,population=population,population_frame=population_frame,beta=params.beta_list[1],control_dict=control_dict))
return sols_raw, StandardSol, [y_U95, y_UQ, y_LQ, y_L95, y_median]
def SimulateOverRangeOfParameters(population_frame, population, control_dict, camp, numberOfIterations, t_stop=200):
infection_matrix, beta_list, largest_eigenvalue = GenerateInfectionMatrix(population_frame,camp,control_dict)
ParamCsv = pd.read_csv(os.path.join(os.path.dirname(cwd),'Parameters/GeneratedParams.csv'))
sols = []
configDict = []
sols_raw = {}
for ii in tqdm(range(min(numberOfIterations,len(ParamCsv)))):
latentRate = 1/ParamCsv.LatentPeriod[ii]
removalRate = 1/ParamCsv.RemovalPeriod[ii]
beta = removalRate*ParamCsv.R0[ii]/largest_eigenvalue
hospRate = 1/ParamCsv.HospPeriod[ii]
deathRateICU = 1/ParamCsv.DeathICUPeriod[ii]
deathRateNoIcu = 1/ParamCsv.DeathNoICUPeriod[ii]
result = simulator().run_model(T_stop=t_stop,infection_matrix=infection_matrix,population=population,population_frame=population_frame,beta=beta,
control_dict= control_dict,
latentRate = latentRate,
removalRate = removalRate,
hospRate = hospRate,
deathRateICU = deathRateICU,
deathRateNoIcu = deathRateNoIcu
)
sols.append(result)
Dict = dict(beta = beta,
latentRate = latentRate,
removalRate = removalRate,
hospRate = hospRate,
deathRateICU = deathRateICU,
deathRateNoIcu = deathRateNoIcu
)
configDict.append(Dict)
sols_raw[(ParamCsv.R0[ii],latentRate,removalRate,hospRate,deathRateICU,deathRateNoIcu)]=result
[y_U95, y_UQ, y_LQ, y_L95, y_median] = GeneratePercentiles(sols)
# standard run
StandardSol = []
StandardSol.append(simulator().run_model(T_stop=t_stop,infection_matrix=infection_matrix,population=population,population_frame=population_frame,beta=params.beta_list[1],control_dict=control_dict))
return sols_raw, StandardSol, [y_U95, y_UQ, y_LQ, y_L95, y_median], configDict
def object_dump(file_name,object_to_dump):
# check if file path exists - if not create
outdir = os.path.dirname(file_name)
if not os.path.exists(outdir):
os.makedirs(os.path.join(cwd,outdir),exist_ok=True)
with open(file_name, 'wb') as handle:
pickle.dump(object_to_dump,handle,protocol=pickle.HIGHEST_PROTOCOL)
return None
def generate_csv(data_to_save,population_frame,filename,input_type=None,time_vec=None):
category_map = {}
for key in categories.keys():
category_map[str(categories[key]['index'])] = key
print(category_map)
if input_type=='percentile':
csv_sol = np.transpose(data_to_save)
solution_csv = pd.DataFrame(csv_sol)
col_names = []
for i in range(csv_sol.shape[1]):
col_names.append(categories[category_map[str(i)]]['longname'])
solution_csv.columns = col_names
solution_csv['Time'] = time_vec
# this is our dataframe to be saved
elif input_type=='raw':
final_frame=pd.DataFrame()
for key, value in tqdm(data_to_save.items()):
csv_sol = np.transpose(value['y']) # age structured
solution_csv = pd.DataFrame(csv_sol)
# setup column names
col_names = []
number_categories_with_age = csv_sol.shape[1]
for i in range(number_categories_with_age):
ii = i % params.number_compartments
jj = floor(i/params.number_compartments)
col_names.append(categories[category_map[str(ii)]]['longname'] + ': ' + str( | np.asarray(population_frame.Age) | numpy.asarray |
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import assert_index_equal
from statsmodels.compat.platform import PLATFORM_WIN
from statsmodels.compat.python import lrange
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas import DataFrame, Series, date_range
import pytest
from scipy.interpolate import interp1d
from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
# Remove imports when range unit root test gets an R implementation
from statsmodels.tools.validation import array_like, bool_like
from statsmodels.tsa.arima_process import arma_acovf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import (
acf,
acovf,
adfuller,
arma_order_select_ic,
breakvar_heteroskedasticity_test,
ccovf,
coint,
grangercausalitytests,
innovations_algo,
innovations_filter,
kpss,
levinson_durbin,
levinson_durbin_pacf,
pacf,
pacf_burg,
pacf_ols,
pacf_yw,
range_unit_root_test,
zivot_andrews,
)
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def acovf_data():
rnd = np.random.RandomState(12345)
return rnd.randn(250)
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
x = data.data["realgdp"].values
y = data.data["infl"].values
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4)
cls.teststat = 0.97505319
cls.pvalue = 0.99399563
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
""""""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4)
cls.teststat = -1.8566374
cls.pvalue = 0.67682968
cls.critvalues = [-4.007, -3.437, -3.137]
# FIXME: do not leave commented-out
# class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
# TODO: get test values from R?
class TestADFNoConstant(CheckADF):
""""""
@classmethod
def setup_class(cls):
with pytest.warns(FutureWarning):
adfuller(cls.x, regression="nc", autolag=None, maxlag=4)
cls.res1 = adfuller(cls.x, regression="n", autolag=None, maxlag=4)
cls.teststat = 3.5227498
cls.pvalue = 0.99999
# Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
cls.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1)
cls.teststat = -4.3346988
cls.pvalue = 0.00038661
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1)
cls.teststat = -4.425093
cls.pvalue = 0.00199633
cls.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="n", autolag=None, maxlag=1)
cls.teststat = -2.4511596
cls.pvalue = 0.013747
# Stata does not return a p-value for noconstant
# this value is just taken from our results
cls.critvalues = [-2.587, -1.950, -1.617]
_, _1, _2, cls.store = adfuller(
cls.y, regression="n", autolag=None, maxlag=1, store=True
)
def test_store_str(self):
assert_equal(
self.store.__str__(), "Augmented Dickey-Fuller Test Results"
)
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load_pandas()
x = data.data["realgdp"]
filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv")
results = pd.read_csv(filename, delimiter=",")
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvar"]
# cls.acf = np.concatenate(([1.], cls.acf))
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False)
cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:, None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# FIXME: enable/xfail/skip or delete
# def pvalue(self):
# pass
# NOTE: should not need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvarfft"]
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
@classmethod
def setup_class(cls):
cls.x = np.concatenate((np.array([np.nan]), cls.x))
cls.acf = cls.results["acvar"] # drop and conservative
cls.qstat = cls.results["Q1"]
cls.res_drop = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False
)
cls.res_conservative = acf(
cls.x,
nlags=40,
qstat=True,
alpha=0.05,
fft=False,
missing="conservative",
)
cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
cls.qstat_none = np.empty(40) * np.nan
cls.res_none = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False
)
def test_raise(self):
with pytest.raises(MissingDataError):
acf(
self.x,
nlags=40,
qstat=True,
fft=False,
alpha=0.05,
missing="raise",
)
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8)
def test_acf_conservative(self):
assert_almost_equal(
self.res_conservative[0][1:41], self.acf, DECIMAL_8
)
def test_qstat_none(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3)
# FIXME: enable/xfail/skip or delete
# how to do this test? the correct q_stat depends on whether nobs=len(x) is
# used when x contains NaNs or whether nobs<len(x) when x contains NaNs
# def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
@classmethod
def setup_class(cls):
cls.pacfols = cls.results["PACOLS"]
cls.pacfyw = cls.results["PACYW"]
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:, None]
# from edited Stata ado file
res = [[-0.1375625, 0.1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0.0, 0.0])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_ols_inefficient(self):
lag_len = 5
pacfols = pacf_ols(self.x, nlags=lag_len, efficient=False)
x = self.x.copy()
x -= x.mean()
n = x.shape[0]
lags = np.zeros((n - 5, 5))
lead = x[5:]
direct = np.empty(lag_len + 1)
direct[0] = 1.0
for i in range(lag_len):
lags[:, i] = x[5 - (i + 1) : -(i + 1)]
direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1]
assert_allclose(pacfols, direct, atol=1e-8)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="lda")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class TestBreakvarHeteroskedasticityTest(object):
from scipy.stats import chi2, f
def test_1d_input(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
# ~ F(2, 2), two-sided test
expected_pvalue = 2 * min(
self.f.cdf(expected_statistic, 2, 2),
self.f.sf(expected_statistic, 2, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_2d_input_with_missing_values(self):
input_residuals = np.array(
[
[0.0, 0.0, np.nan],
[1.0, np.nan, 1.0],
[2.0, 2.0, np.nan],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
]
)
expected_statistic = np.array(
[
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2)
/ (0.0 ** 2 + 1.0 ** 2 + 2.0 ** 2),
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2) / (0.0 ** 2 + 2.0 ** 2),
np.nan,
]
)
expected_pvalue = np.array(
[
2
* min(
self.f.cdf(expected_statistic[0], 3, 3),
self.f.sf(expected_statistic[0], 3, 3),
),
2
* min(
self.f.cdf(expected_statistic[1], 3, 2),
self.f.sf(expected_statistic[1], 3, 2),
),
np.nan,
]
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert_equal(actual_statistic, expected_statistic)
assert_equal(actual_pvalue, expected_pvalue)
@pytest.mark.parametrize(
"subset_length,expected_statistic,expected_pvalue",
[
(2, 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
(0.5, 10, 2 * min(f.cdf(10, 3, 3), f.sf(10, 3, 3))),
],
)
def test_subset_length(
self, subset_length, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
subset_length=subset_length,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
@pytest.mark.parametrize(
"alternative,expected_statistic,expected_pvalue",
[
("two-sided", 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
("decreasing", 1 / 41, f.sf(1 / 41, 2, 2)),
("increasing", 41, f.sf(41, 2, 2)),
],
)
def test_alternative(
self, alternative, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
alternative=alternative,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_use_chi2(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
expected_pvalue = 2 * min(
self.chi2.cdf(2 * expected_statistic, 2),
self.chi2.sf(2 * expected_statistic, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
use_f=False,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
y1 = data.data["realcons"].values
y2 = data.data["realgdp"].values
def test_tstat(self):
assert_almost_equal(self.coint_t, self.teststat, DECIMAL_4)
# this does not produce the old results anymore
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
@classmethod
def setup_class(cls):
# cls.coint_t = coint(cls.y1, cls.y2, trend="c")[0]
cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[
0
]
cls.teststat = -1.8208817
cls.teststat = -1.830170986148
def test_coint():
nobs = 200
scale_e = 1
const = [1, 0, 0.5, 0]
np.random.seed(123)
unit = np.random.randn(nobs).cumsum()
y = scale_e * np.random.randn(nobs, 4)
y[:, :2] += unit[:, None]
y += const
y = np.round(y, 4)
# FIXME: enable/xfail/skip or delete
for trend in []: # ['c', 'ct', 'ctt', 'n']:
print("\n", trend)
print(coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None))
# results from Stata egranger
res_egranger = {}
# trend = 'ct'
res = res_egranger["ct"] = {}
res[0] = [
-5.615251442239,
-4.406102369132,
-3.82866685109,
-3.532082997903,
]
res[1] = [
-5.63591313706,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[2] = [
-2.892029275027,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[3] = [-5.626932544079, -5.08363327039, -4.502469783057, -4.2031051091]
# trend = 'c'
res = res_egranger["c"] = {}
# first critical value res[0][1] has a discrepancy starting at 4th decimal
res[0] = [
-5.760696844656,
-3.952043522638,
-3.367006313729,
-3.065831247948,
]
# manually adjusted to have higher precision as in other cases
res[0][1] = -3.952321293401682
res[1] = [
-5.781087068772,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[2] = [
-2.477444137366,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[3] = [
-5.778205811661,
-4.735249216434,
-4.152738973763,
-3.852480848968,
]
# trend = 'ctt'
res = res_egranger["ctt"] = {}
res[0] = [
-5.644431269946,
-4.796038299708,
-4.221469431008,
-3.926472577178,
]
res[1] = [-5.665691609506, -5.111158174219, -4.53317278104, -4.23601008516]
res[2] = [-3.161462374828, -5.111158174219, -4.53317278104, -4.23601008516]
res[3] = [
-5.657904558563,
-5.406880189412,
-4.826111619543,
-4.527090164875,
]
# The following for 'n' are only regression test numbers
# trend = 'n' not allowed in egranger
# trend = 'n'
res = res_egranger["n"] = {}
nan = np.nan # shortcut for table
res[0] = [-3.7146175989071137, nan, nan, nan]
res[1] = [-3.8199323012888384, nan, nan, nan]
res[2] = [-1.6865000791270679, nan, nan, nan]
res[3] = [-3.7991270451873675, nan, nan, nan]
with pytest.warns(FutureWarning):
# Ensure warning raised for nc rather than n
coint(y[:, 0], y[:, 1], trend="nc", maxlag=4, autolag=None)
for trend in ["c", "ct", "ctt", "n"]:
res1 = {}
res1[0] = coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None)
res1[1] = coint(
y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None
)
res1[2] = coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None)
res1[3] = coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None)
for i in range(4):
res = res_egranger[trend]
assert_allclose(res1[i][0], res[i][0], rtol=1e-11)
r2 = res[i][1:]
r1 = res1[i][2]
assert_allclose(r1, r2, rtol=0, atol=6e-7)
# use default autolag #4490
res1_0 = coint(y[:, 0], y[:, 1], trend="ct", maxlag=4)
assert_allclose(res1_0[2], res_egranger["ct"][0][1:], rtol=0, atol=6e-7)
# the following is just a regression test
assert_allclose(
res1_0[:2],
[-13.992946638547112, 2.270898990540678e-27],
rtol=1e-10,
atol=1e-27,
)
def test_coint_identical_series():
nobs = 200
scale_e = 1
np.random.seed(123)
y = scale_e * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with pytest.warns(CollinearityWarning):
c = coint(y, y, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
def test_coint_perfect_collinearity():
# test uses nearly perfect collinearity
nobs = 200
scale_e = 1
np.random.seed(123)
x = scale_e * np.random.randn(nobs, 2)
y = 1 + x.sum(axis=1) + 1e-7 * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with warnings.catch_warnings(record=True) as w:
c = coint(y, x, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
# R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]["ssr_ftest"], decimal=7)
assert_almost_equal(
gr[2][0]["params_ftest"], gr[2][0]["ssr_ftest"], decimal=7
)
def test_grangercausality_single(self):
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
gr2 = grangercausalitytests(data[:, 1::-1], [2], verbose=False)
assert 1 in gr
assert 1 not in gr2
assert_almost_equal(
gr[2][0]["ssr_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
assert_almost_equal(
gr[2][0]["params_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
def test_granger_fails_on_nobs_check(self, reset_randomstate):
# Test that if maxlag is too large, Granger Test raises a clear error.
x = np.random.rand(10, 2)
grangercausalitytests(x, 2, verbose=False) # This should pass.
with pytest.raises(ValueError):
grangercausalitytests(x, 3, verbose=False)
def test_granger_fails_on_finite_check(self, reset_randomstate):
x = np.random.rand(1000, 2)
x[500, 0] = np.nan
x[750, 1] = np.inf
with pytest.raises(ValueError, match="x contains NaN"):
grangercausalitytests(x, 2)
def test_granger_fails_on_zero_lag(self, reset_randomstate):
x = np.random.rand(1000, 2)
with pytest.raises(
ValueError,
match="maxlag must be a non-empty list containing only positive integers",
):
grangercausalitytests(x, [0, 1, 2])
class TestKPSS:
"""
R-code
------
library(tseries)
kpss.stat(x, "Level")
kpss.stat(x, "Trend")
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
def test_fail_nonvector_input(self, reset_randomstate):
# should be fine
with pytest.warns(InterpolationWarning):
kpss(self.x, nlags="legacy")
x = np.random.rand(20, 2)
assert_raises(ValueError, kpss, x)
def test_fail_unclear_hypothesis(self):
# these should be fine,
with pytest.warns(InterpolationWarning):
kpss(self.x, "c", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "C", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "ct", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "CT", nlags="legacy")
assert_raises(
ValueError, kpss, self.x, "unclear hypothesis", nlags="legacy"
)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "c", 3)
assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3)
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "ct", 3)
assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "c", 3)
assert_equal(pval, 0.01)
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "ct", 3)
assert_equal(pval, 0.01)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = kpss(self.x, "c", 3, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
assert_equal(store.lags, 3)
# test autolag function _kpss_autolag against SAS 9.3
def test_lags(self):
# real GDP from macrodata data set
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="auto")
assert_equal(res[2], 9)
# real interest rates from macrodata data set
res = kpss(sunspots.load().data["SUNACTIVITY"], "c", nlags="auto")
assert_equal(res[2], 7)
# volumes from nile data set
with pytest.warns(InterpolationWarning):
res = kpss(nile.load().data["volume"], "c", nlags="auto")
assert_equal(res[2], 5)
# log-coinsurance from randhie data set
with pytest.warns(InterpolationWarning):
res = kpss(randhie.load().data["lncoins"], "ct", nlags="auto")
assert_equal(res[2], 75)
# in-vehicle time from modechoice data set
with pytest.warns(InterpolationWarning):
res = kpss(modechoice.load().data["invt"], "ct", nlags="auto")
assert_equal(res[2], 18)
def test_kpss_fails_on_nobs_check(self):
# Test that if lags exceeds number of observations KPSS raises a
# clear error
# GH5925
nobs = len(self.x)
msg = r"lags \({}\) must be < number of observations \({}\)".format(
nobs, nobs
)
with pytest.raises(ValueError, match=msg):
kpss(self.x, "c", nlags=nobs)
def test_kpss_autolags_does_not_assign_lags_equal_to_nobs(self):
# Test that if *autolags* exceeds number of observations, we set
# suitable lags
# GH5925
base = np.array([0, 0, 0, 0, 0, 1, 1.0])
data_which_breaks_autolag = np.r_[np.tile(base, 297 // 7), [0, 0, 0]]
kpss(data_which_breaks_autolag, nlags="auto")
def test_legacy_lags(self):
# Test legacy lags are the same
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="legacy")
assert_equal(res[2], 15)
def test_unknown_lags(self):
# Test legacy lags are the same
with pytest.raises(ValueError):
kpss(self.x, "c", nlags="unknown")
def test_none(self):
with pytest.warns(FutureWarning):
kpss(self.x, nlags=None)
class TestRUR:
"""
Simple implementation
------
Since an R implementation of the test cannot be found, the method is tested against
a simple implementation using a for loop.
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
# To be removed when range unit test gets an R implementation
def simple_rur(self, x, store=False):
x = array_like(x, "x")
store = bool_like(store, "store")
nobs = x.shape[0]
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
# Table from [1] has been replicated using 200,000 samples
# Critical values for new n_obs values have been identified
pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95]
n = np.array(
[25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]
)
crit = np.array(
[
[0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312],
[0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613],
[0.907, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393],
[0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049],
[0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482],
[0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2482],
[1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584],
[1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073],
[1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439],
[1.1204, 1.2295, 1.3318, 1.4656, 3.1054, 3.4632],
[1.1309, 1.2347, 1.3318, 1.4693, 3.1165, 3.4717],
[1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807],
]
)
# Interpolation for nobs
inter_crit = np.zeros((1, crit.shape[1]))
for i in range(crit.shape[1]):
f = interp1d(n, crit[:, i])
inter_crit[0, i] = f(nobs)
# Calculate RUR stat
count = 0
max_p = x[0]
min_p = x[0]
for v in x[1:]:
if v > max_p:
max_p = v
count = count + 1
if v < min_p:
min_p = v
count = count + 1
rur_stat = count / np.sqrt(len(x))
k = len(pvals) - 1
for i in range(len(pvals) - 1, -1, -1):
if rur_stat < inter_crit[0, i]:
k = i
else:
break
p_value = pvals[k]
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
direction = ""
if p_value == pvals[-1]:
direction = "smaller"
elif p_value == pvals[0]:
direction = "larger"
if direction:
warnings.warn(
warn_msg.format(direction=direction), InterpolationWarning
)
crit_dict = {
"10%": inter_crit[0, 3],
"5%": inter_crit[0, 2],
"2.5%": inter_crit[0, 1],
"1%": inter_crit[0, 0],
}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.nobs = nobs
rstore.H0 = "The series is not stationary"
rstore.HA = "The series is stationary"
return rur_stat, p_value, crit_dict, rstore
else:
return rur_stat, p_value, crit_dict
def test_fail_nonvector_input(self, reset_randomstate):
with pytest.warns(InterpolationWarning):
range_unit_root_test(self.x)
x = np.random.rand(20, 2)
assert_raises(ValueError, range_unit_root_test, x)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
rur_stat, _, _ = range_unit_root_test(self.x)
simple_rur_stat, _, _ = self.simple_rur(self.x)
assert_almost_equal(rur_stat, simple_rur_stat, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _ = range_unit_root_test(self.x)
_, simple_pval, _ = self.simple_rur(self.x)
assert_equal(pval, simple_pval)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = range_unit_root_test(self.x, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s, fft=False), acovf(s.values, fft=False))
def test_acovf2d(reset_randomstate):
dta = sunspots.load_pandas().data
dta.index = date_range(start="1700", end="2009", freq="A")[:309]
del dta["YEAR"]
res = acovf(dta, fft=False)
assert_equal(res, acovf(dta.values, fft=False))
x = np.random.random((10, 2))
with pytest.raises(ValueError):
acovf(x, fft=False)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
q = np.random.normal(size=100)
F1 = acovf(q, demean=demean, adjusted=adjusted, fft=True)
F2 = acovf(q, demean=demean, adjusted=adjusted, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_ccovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
x = np.random.normal(size=128)
y = np.random.normal(size=128)
F1 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=False)
F2 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=True)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
@pytest.mark.parametrize("fft", [True, False])
def test_compare_acovf_vs_ccovf(demean, adjusted, fft, reset_randomstate):
x = np.random.normal(size=128)
F1 = acovf(x, demean=demean, adjusted=adjusted, fft=fft)
F2 = ccovf(x, x, demean=demean, adjusted=adjusted, fft=fft)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.smoke
@pytest.mark.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams] # FIXME: Never used
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
# regression tests in case we change algorithm to minic in sas
aic_x = np.array(
[
[764.36517643, 552.7342255, 484.29687843],
[562.10924262, 485.5197969, 480.32858497],
[507.04581344, 482.91065829, 481.91926034],
[484.03995962, 482.14868032, 483.86378955],
[481.8849479, 483.8377379, 485.83756612],
]
)
bic_x = np.array(
[
[767.88663735, 559.77714733, 494.86126118],
[569.15216446, 496.08417966, 494.41442864],
[517.61019619, 496.99650196, 499.52656493],
[498.12580329, 499.75598491, 504.99255506],
[499.49225249, 504.96650341, 510.48779255],
]
)
aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
index = pd.date_range("2000-1-1", freq="M", periods=len(y))
y_series = pd.Series(y, index=index)
res_pd = arma_order_select_ic(
y_series, max_ar=2, max_ma=1, ic=["aic", "bic"], trend="n"
)
assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5)
assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5)
assert_equal(res_pd.aic_min_order, (2, 1))
assert_equal(res_pd.bic_min_order, (1, 1))
res = arma_order_select_ic(y, ic="aic", trend="n")
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array(
[
0.86074377817203640006,
0.85316549067906921611,
0.87104653774363305363,
0.60692382068987393851,
0.69225941967301307667,
0.73336177248909339976,
0.03661329261479619179,
0.15693067239962379955,
0.12777403512447857437,
-0.27531446294481976,
-0.24198139631653581283,
-0.23903317951236391359,
-0.26000241325906497947,
-0.21282920015519238288,
-0.15943768324388354896,
0.25169301564268781179,
0.1762305709151877342,
0.12678133368791388857,
0.89755829086753169399,
0.82667068795350151511,
]
)
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(
sunspots.load_pandas().data[["SUNACTIVITY"]], fft=True, nlags=20
)
assert_equal(result.ndim, 1)
def test_levinson_durbin_acov():
rho = 0.9
m = 20
acov = rho ** np.arange(200)
sigma2_eps, ar, pacf, _, _ = levinson_durbin(acov, m, isacov=True)
assert_allclose(sigma2_eps, 1 - rho ** 2)
assert_allclose(ar, np.array([rho] + [0] * (m - 1)), atol=1e-8)
assert_allclose(pacf, np.array([1, rho] + [0] * (m - 1)), atol=1e-8)
@pytest.mark.parametrize("missing", ["conservative", "drop", "raise", "none"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags(acovf_data, adjusted, demean, fft, missing):
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
@pytest.mark.parametrize("missing", ["conservative", "drop"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags_missing(acovf_data, adjusted, demean, fft, missing):
acovf_data = acovf_data.copy()
acovf_data[1:3] = np.nan
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
def test_acovf_error(acovf_data):
with pytest.raises(ValueError):
acovf(acovf_data, nlag=250, fft=False)
def test_pacf2acf_ar():
pacf = np.zeros(10)
pacf[0] = 1
pacf[1] = 0.9
ar, acf = levinson_durbin_pacf(pacf)
assert_allclose(acf, 0.9 ** np.arange(10.0))
assert_allclose(ar, pacf[1:], atol=1e-8)
ar, acf = levinson_durbin_pacf(pacf, nlags=5)
assert_allclose(acf, 0.9 ** np.arange(6.0))
assert_allclose(ar, pacf[1:6], atol=1e-8)
def test_pacf2acf_levinson_durbin():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
ar, acf = levinson_durbin_pacf(pacf)
_, ar_ld, pacf_ld, _, _ = levinson_durbin(acf, 10, isacov=True)
assert_allclose(ar, ar_ld, atol=1e-8)
assert_allclose(pacf, pacf_ld, atol=1e-8)
# From R, FitAR, PacfToAR
ar_from_r = [
-4.1609,
-9.2549,
-14.4826,
-17.6505,
-17.5012,
-14.2969,
-9.5020,
-4.9184,
-1.7911,
-0.3486,
]
assert_allclose(ar, ar_from_r, atol=1e-4)
def test_pacf2acf_errors():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf, nlags=20)
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf[1:])
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros(10))
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros((10, 2)))
def test_pacf_burg():
rnd = np.random.RandomState(12345)
e = rnd.randn(10001)
y = e[1:] + 0.5 * e[:-1]
pacf, sigma2 = pacf_burg(y, 10)
yw_pacf = pacf_yw(y, 10)
assert_allclose(pacf, yw_pacf, atol=5e-4)
# Internal consistency check between pacf and sigma2
ye = y - y.mean()
s2y = ye.dot(ye) / 10000
pacf[0] = 0
sigma2_direct = s2y * np.cumprod(1 - pacf ** 2)
assert_allclose(sigma2, sigma2_direct, atol=1e-3)
def test_pacf_burg_error():
with pytest.raises(ValueError):
pacf_burg(np.empty((20, 2)), 10)
with pytest.raises(ValueError):
pacf_burg(np.empty(100), 101)
def test_innovations_algo_brockwell_davis():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, sigma2 = innovations_algo(acovf, nobs=4)
exp_theta = np.array([[0], [-0.4972], [-0.6606], [-0.7404]])
assert_allclose(theta, exp_theta, rtol=1e-4)
assert_allclose(sigma2, [1.81, 1.3625, 1.2155, 1.1436], rtol=1e-4)
theta, sigma2 = innovations_algo(acovf, nobs=500)
assert_allclose(theta[-1, 0], ma)
assert_allclose(sigma2[-1], 1.0)
def test_innovations_algo_rtol():
ma = np.array([-0.9, 0.5])
acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]])
theta, sigma2 = innovations_algo(acovf, nobs=500)
theta_2, sigma2_2 = innovations_algo(acovf, nobs=500, rtol=1e-8)
assert_allclose(theta, theta_2)
assert_allclose(sigma2, sigma2_2)
def test_innovations_errors():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
with pytest.raises(TypeError):
innovations_algo(acovf, nobs=2.2)
with pytest.raises(ValueError):
innovations_algo(acovf, nobs=-1)
with pytest.raises(ValueError):
innovations_algo( | np.empty((2, 2)) | numpy.empty |
import torch
import numpy as np
import cv2
import tqdm
import os
import json
from pycocotools.mask import *
from src.unet_plus import SE_Res50UNet,SE_Res101UNet
import time
local_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))
TEST_IMG_PATH = '/mnt/jinnan2_round2_test_b_20190424'
NORMAL_LIST_PATH = 'cvfly_normal_list_b.txt'
SUBMIT_PATH = './submit/cvfly_test_b_{}.json'.format(local_time)
SE50_MODEL_PATH = './models/se50/best_fold3_se50.pth'
SE101_MODEL_PATH = './models/se101/best_se101.pth'
def get_models(is_clc = False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model50 = SE_Res50UNet(6, cls_only=is_clc)
model50.load_state_dict(torch.load(SE50_MODEL_PATH), strict=True)
model50 = model50.to(device)
model50.eval()
model101 = SE_Res101UNet(6,cls_only = is_clc)
model101.load_state_dict(torch.load(SE101_MODEL_PATH), strict=True)
model101 = model101.to(device)
model101.eval()
return model50, model101
def clc_aug(img):
img_list = []
img_list.append(img.copy())
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
return img_list
def clc_aug_tensor(img,size = None):
img = cv2.resize(img, size)
assert img.shape[0] == img.shape[1]
img_list = []
img_list.append(img.copy())
img_list.append( | np.flipud(img) | numpy.flipud |
"""Mobjects that represent coordinate systems."""
__all__ = [
"CoordinateSystem",
"Axes",
"ThreeDAxes",
"NumberPlane",
"PolarPlane",
"ComplexPlane",
]
import fractions as fr
import numbers
from typing import Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
import numpy as np
from colour import Color
from manim.mobject.opengl_compatibility import ConvertToOpenGL
from .. import config
from ..constants import *
from ..mobject.functions import ParametricFunction
from ..mobject.geometry import (
Arrow,
Circle,
DashedLine,
Dot,
Line,
Rectangle,
RegularPolygon,
)
from ..mobject.number_line import NumberLine
from ..mobject.svg.tex_mobject import MathTex
from ..mobject.types.vectorized_mobject import (
Mobject,
VDict,
VectorizedPoint,
VGroup,
VMobject,
)
from ..utils.color import (
BLACK,
BLUE,
BLUE_D,
GREEN,
LIGHT_GREY,
WHITE,
YELLOW,
color_gradient,
invert_color,
)
from ..utils.config_ops import merge_dicts_recursively, update_dict_recursively
from ..utils.simple_functions import binary_search
from ..utils.space_ops import angle_of_vector
class CoordinateSystem:
"""
Abstract class for Axes and NumberPlane
Examples
--------
.. manim:: CoordSysExample
:save_last_frame:
class CoordSysExample(Scene):
def construct(self):
# the location of the ticks depends on the x_range and y_range.
grid = Axes(
x_range=[0, 1, 0.05], # step size determines num_decimal_places.
y_range=[0, 1, 0.05],
x_length=9,
y_length=5.5,
axis_config={
"numbers_to_include": np.arange(0, 1 + 0.1, 0.1),
"number_scale_value": 0.5,
},
tips=False,
)
# Labels for the x-axis and y-axis.
y_label = grid.get_y_axis_label("y", edge=LEFT, direction=LEFT, buff=0.4)
x_label = grid.get_x_axis_label("x")
grid_labels = VGroup(x_label, y_label)
graphs = VGroup()
for n in np.arange(1, 20 + 0.5, 0.5):
graphs += grid.get_graph(lambda x: x ** n, color=WHITE)
graphs += grid.get_graph(
lambda x: x ** (1 / n), color=WHITE, use_smoothing=False
)
# Extra lines and labels for point (1,1)
graphs += grid.get_horizontal_line(grid.c2p(1, 1, 0), color=BLUE)
graphs += grid.get_vertical_line(grid.c2p(1, 1, 0), color=BLUE)
graphs += Dot(point=grid.c2p(1, 1, 0), color=YELLOW)
graphs += Tex("(1,1)").scale(0.75).next_to(grid.c2p(1, 1, 0))
title = Title(
# spaces between braces to prevent SyntaxError
r"Graphs of $y=x^{ {1}\over{n} }$ and $y=x^n (n=1,2,3,...,20)$",
include_underline=False,
scale_factor=0.85,
)
self.add(title, graphs, grid, grid_labels)
"""
def __init__(
self,
x_range=None,
y_range=None,
x_length=None,
y_length=None,
dimension=2,
):
self.dimension = dimension
default_step = 1
if x_range is None:
x_range = [
round(-config["frame_x_radius"]),
round(config["frame_x_radius"]),
default_step,
]
elif len(x_range) == 2:
x_range = [*x_range, default_step]
if y_range is None:
y_range = [
round(-config["frame_y_radius"]),
round(config["frame_y_radius"]),
default_step,
]
elif len(y_range) == 2:
y_range = [*y_range, default_step]
self.x_range = x_range
self.y_range = y_range
self.x_length = x_length
self.y_length = y_length
self.num_sampled_graph_points_per_tick = 10
def coords_to_point(self, *coords):
raise NotImplementedError()
def point_to_coords(self, point):
raise NotImplementedError()
def c2p(self, *coords):
"""Abbreviation for coords_to_point"""
return self.coords_to_point(*coords)
def p2c(self, point):
"""Abbreviation for point_to_coords"""
return self.point_to_coords(point)
def get_axes(self):
raise NotImplementedError()
def get_axis(self, index):
return self.get_axes()[index]
def get_x_axis(self):
return self.get_axis(0)
def get_y_axis(self):
return self.get_axis(1)
def get_z_axis(self):
return self.get_axis(2)
def get_x_axis_label(self, label_tex, edge=UR, direction=UR, **kwargs):
return self.get_axis_label(
label_tex, self.get_x_axis(), edge, direction, **kwargs
)
def get_y_axis_label(
self, label_tex, edge=UR, direction=UP * 0.5 + RIGHT, **kwargs
):
return self.get_axis_label(
label_tex, self.get_y_axis(), edge, direction, **kwargs
)
# move to a util_file, or Mobject()??
@staticmethod
def create_label_tex(label_tex) -> "Mobject":
"""Checks if the label is a ``float``, ``int`` or a ``str`` and creates a :class:`~.MathTex` label accordingly.
Parameters
----------
label_tex : The label to be compared against the above types.
Returns
-------
:class:`~.Mobject`
The label.
"""
if (
isinstance(label_tex, float)
or isinstance(label_tex, int)
or isinstance(label_tex, str)
):
label_tex = MathTex(label_tex)
return label_tex
def get_axis_label(
self,
label: Union[float, str, "Mobject"],
axis: "Mobject",
edge: Sequence[float],
direction: Sequence[float],
buff: float = SMALL_BUFF,
) -> "Mobject":
"""Gets the label for an axis.
Parameters
----------
label
The label. Can be any mobject or `int/float/str` to be used with :class:`~.MathTex`
axis
The axis to which the label will be added.
edge
The edge of the axes to which the label will be added. ``RIGHT`` adds to the right side of the axis
direction
Allows for further positioning of the label.
buff
The distance of the label from the line.
Returns
-------
:class:`~.Mobject`
The positioned label along the given axis.
"""
label = self.create_label_tex(label)
label.next_to(axis.get_edge_center(edge), direction, buff=buff)
label.shift_onto_screen(buff=MED_SMALL_BUFF)
return label
def get_axis_labels(
self,
x_label: Union[float, str, "Mobject"] = "x",
y_label: Union[float, str, "Mobject"] = "y",
) -> "VGroup":
"""Defines labels for the x_axis and y_axis of the graph.
Parameters
----------
x_label
The label for the x_axis
y_label
The label for the y_axis
Returns
-------
:class:`~.VGroup`
A :class:`~.Vgroup` of the labels for the x_axis and y_axis.
See Also
--------
:class:`get_x_axis_label`
:class:`get_y_axis_label`
"""
self.axis_labels = VGroup(
self.get_x_axis_label(x_label),
self.get_y_axis_label(y_label),
)
return self.axis_labels
def add_coordinates(
self,
*axes_numbers: Union[
Optional[Iterable[float]], Union[Dict[float, Union[str, float, "Mobject"]]]
],
**kwargs,
):
"""Adds labels to the axes.
Parameters
----------
axes_numbers
The numbers to be added to the axes. Use ``None`` to represent an axis with default labels.
Examples
--------
.. code-block:: python
ax = ThreeDAxes()
x_labels = range(-4, 5)
z_labels = range(-4, 4, 2)
ax.add_coordinates(x_labels, None, z_labels) # default y labels, custom x & z labels
ax.add_coordinates(x_labels) # only x labels
.. code-block:: python
# specifically control the position and value of the labels using a dict
ax = Axes(x_range=[0, 7])
x_pos = [x for x in range(1, 8)]
# strings are automatically converted into a `Tex` mobject.
x_vals = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
x_dict = dict(zip(x_pos, x_vals))
ax.add_coordinates(x_dict)
"""
self.coordinate_labels = VGroup()
# if nothing is passed to axes_numbers, produce axes with default labelling
if not axes_numbers:
axes_numbers = [None for _ in range(self.dimension)]
for axis, values in zip(self.axes, axes_numbers):
if isinstance(values, dict):
labels = axis.add_labels(values, **kwargs)
else:
labels = axis.add_numbers(values, **kwargs)
self.coordinate_labels.add(labels)
return self
def get_line_from_axis_to_point(
self,
index: int,
point: Sequence[float],
line_func: Line = DashedLine,
color: Color = LIGHT_GREY,
stroke_width: float = 2,
) -> Line:
"""Returns a straight line from a given axis to a point in the scene.
Parameters
----------
index
Specifies the axis from which to draw the line. `0 = x_axis`, `1 = y_axis`
point
The point to which the line will be drawn.
line_func
The function of the :class:`~.Line` mobject used to construct the line.
color
The color of the line.
stroke_width
The stroke width of the line.
Returns
-------
:class:`~.Line`
The line from an axis to a point.
See Also
--------
:class:`get_vertical_line`
:class:`get_horizontal_line`
"""
axis = self.get_axis(index)
line = line_func(axis.get_projection(point), point)
line.set_stroke(color, stroke_width)
return line
def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:
"""A vertical line from the x-axis to a given point in the scene.
Parameters
----------
point
The point to which the vertical line will be drawn.
kwargs
Additional parameters to be passed to :class:`get_line_from_axis_to_point`
Returns
-------
:class:`Line`
A vertical line from the x-axis to the point.
"""
return self.get_line_from_axis_to_point(0, point, **kwargs)
def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:
"""A horizontal line from the y-axis to a given point in the scene.
Parameters
----------
point
The point to which the horizontal line will be drawn.
kwargs
Additional parameters to be passed to :class:`get_line_from_axis_to_point`
Returns
-------
:class:`Line`
A horizontal line from the y-axis to the point.
"""
return self.get_line_from_axis_to_point(1, point, **kwargs)
# graphing
def get_graph(
self,
function: Callable[[float], float],
x_range: Optional[Sequence[float]] = None,
**kwargs,
):
"""Generates a curve based on a function.
Parameters
----------
function
The function used to construct the :class:`~.ParametricFunction`.
x_range
The range of the curve along the axes. ``x_range = [x_min, x_max]``.
kwargs
Additional parameters to be passed to :class:`~.ParametricFunction`.
Returns
-------
:class:`~.ParametricFunction`
The plotted curve.
"""
t_range = np.array(self.x_range, dtype=float)
if x_range is not None:
t_range[: len(x_range)] = x_range
if x_range is None or len(x_range) < 3:
# if t_range has a defined step size, increase the number of sample points per tick
t_range[2] /= self.num_sampled_graph_points_per_tick
# For axes, the third coordinate of x_range indicates
# tick frequency. But for functions, it indicates a
# sample frequency
graph = ParametricFunction(
lambda t: self.coords_to_point(t, function(t)), t_range=t_range, **kwargs
)
graph.underlying_function = function
return graph
def get_parametric_curve(self, function, **kwargs):
dim = self.dimension
graph = ParametricFunction(
lambda t: self.coords_to_point(*function(t)[:dim]), **kwargs
)
graph.underlying_function = function
return graph
def input_to_graph_point(self, x: float, graph: "ParametricFunction") -> np.ndarray:
"""Returns the coordinates of the point on the ``graph``
corresponding to the input ``x`` value.
Parameters
----------
x
The x-value for which the coordinates of corresponding point on the :attr:`graph` are to be found.
graph
The :class:`~.ParametricFunction` on which the x-value and y-value lie.
Returns
-------
:class:`np.ndarray`
The coordinates of the point on the :attr:`graph` corresponding to the :attr:`x` value.
"""
if hasattr(graph, "underlying_function"):
return graph.function(x)
else:
alpha = binary_search(
function=lambda a: self.point_to_coords(graph.point_from_proportion(a))[
0
],
target=x,
lower_bound=self.x_range[0],
upper_bound=self.x_range[1],
)
if alpha is not None:
return graph.point_from_proportion(alpha)
else:
return None
def i2gp(self, x, graph):
"""
Alias for :meth:`input_to_graph_point`.
"""
return self.input_to_graph_point(x, graph)
def get_graph_label(
self,
graph: "ParametricFunction",
label: Union[float, str, "Mobject"] = "f(x)",
x_val: Optional[float] = None,
direction: Sequence[float] = RIGHT,
buff: float = MED_SMALL_BUFF,
color: Optional[Color] = None,
dot: bool = False,
dot_config: Optional[dict] = None,
) -> Mobject:
"""Creates a properly positioned label for the passed graph,
styled with parameters and an optional dot.
Parameters
----------
graph
The curve of the function plotted.
label
The label for the function's curve. Written with :class:`MathTex` if not specified otherwise.
x_val
The x_value with which the label should be aligned.
direction
The cartesian position, relative to the curve that the label will be at --> ``LEFT``, ``RIGHT``
buff
The buffer space between the curve and the label.
color
The color of the label.
dot
Adds a dot at the given point on the graph.
dot_config
Additional parameters to be passed to :class:`~.Dot`.
Returns
-------
:class:`Mobject`
The positioned label and :class:`~.Dot`, if applicable.
"""
if dot_config is None:
dot_config = {}
label = self.create_label_tex(label)
color = color or graph.get_color()
label.set_color(color)
if x_val is None:
# Search from right to left
for x in np.linspace(self.x_range[1], self.x_range[0], 100):
point = self.input_to_graph_point(x, graph)
if point[1] < config["frame_y_radius"]:
break
else:
point = self.input_to_graph_point(x_val, graph)
label.next_to(point, direction, buff=buff)
label.shift_onto_screen()
if dot:
label.add(Dot(point=point, **dot_config))
return label
# calculus
def get_riemann_rectangles(
self,
graph: "ParametricFunction",
x_range: Optional[Sequence[float]] = None,
dx: Optional[float] = 0.1,
input_sample_type: str = "left",
stroke_width: float = 1,
stroke_color: Color = BLACK,
fill_opacity: float = 1,
color: Union[Iterable[Color], Color] = np.array((BLUE, GREEN)),
show_signed_area: bool = True,
bounded_graph: "ParametricFunction" = None,
blend: bool = False,
width_scale_factor: float = 1.001,
) -> VGroup:
"""This method returns the :class:`~.VGroup` of the Riemann Rectangles for
a particular curve.
Parameters
----------
graph
The graph whose area will be approximated by Riemann rectangles.
x_range
The minimum and maximum x-values of the rectangles. ``x_range = [x_min, x_max]``.
dx
The change in x-value that separates each rectangle.
input_sample_type
Can be any of ``"left"``, ``"right"`` or ``"center"``. Refers to where
the sample point for the height of each Riemann Rectangle
will be inside the segments of the partition.
stroke_width
The stroke_width of the border of the rectangles.
stroke_color
The color of the border of the rectangle.
fill_opacity
The opacity of the rectangles.
color
The colors of the rectangles. Creates a balanced gradient if multiple colors are passed.
show_signed_area
Indicates negative area when the curve dips below the x-axis by inverting its color.
blend
Sets the :attr:`stroke_color` to :attr:`fill_color`, blending the rectangles without clear separation.
bounded_graph
If a secondary graph is specified, encloses the area between the two curves.
width_scale_factor
The factor by which the width of the rectangles is scaled.
Returns
-------
:class:`~.VGroup`
A :class:`~.VGroup` containing the Riemann Rectangles.
"""
# setting up x_range, overwrite user's third input
if x_range is None:
if bounded_graph is None:
x_range = [graph.t_min, graph.t_max]
else:
x_min = max(graph.t_min, bounded_graph.t_min)
x_max = min(graph.t_max, bounded_graph.t_max)
x_range = [x_min, x_max]
x_range = [*x_range[:2], dx]
rectangles = VGroup()
x_range = np.arange(*x_range)
# allows passing a string to color the graph
if type(color) is str:
colors = [color] * len(x_range)
else:
colors = color_gradient(color, len(x_range))
for x, color in zip(x_range, colors):
if input_sample_type == "left":
sample_input = x
elif input_sample_type == "right":
sample_input = x + dx
elif input_sample_type == "center":
sample_input = x + 0.5 * dx
else:
raise ValueError("Invalid input sample type")
graph_point = self.input_to_graph_point(sample_input, graph)
if bounded_graph is None:
y_point = self.origin_shift(self.y_range)
else:
y_point = bounded_graph.underlying_function(x)
points = VGroup(
*list(
map(
VectorizedPoint,
[
self.coords_to_point(x, y_point),
self.coords_to_point(x + width_scale_factor * dx, y_point),
graph_point,
],
)
)
)
rect = Rectangle().replace(points, stretch=True)
rectangles.add(rect)
# checks if the rectangle is under the x-axis
if self.p2c(graph_point)[1] < y_point and show_signed_area:
color = invert_color(color)
# blends rectangles smoothly
if blend:
stroke_color = color
rect.set_style(
fill_color=color,
fill_opacity=fill_opacity,
stroke_color=stroke_color,
stroke_width=stroke_width,
)
return rectangles
def get_area(
self,
graph: "ParametricFunction",
x_range: Optional[Sequence[float]] = None,
color: Union[Color, Iterable[Color]] = [BLUE, GREEN],
opacity: float = 0.3,
dx_scaling: float = 1,
bounded: "ParametricFunction" = None,
):
"""Returns a :class:`~.VGroup` of Riemann rectangles sufficiently small enough to visually
approximate the area under the graph passed.
Parameters
----------
graph
The graph/curve for which the area needs to be gotten.
x_range
The range of the minimum and maximum x-values of the area. ``x_range = [x_min, x_max]``.
color
The color of the area. Creates a gradient if a list of colors is provided.
opacity
The opacity of the area.
bounded
If a secondary :attr:`graph` is specified, encloses the area between the two curves.
dx_scaling
The factor by which the :attr:`dx` value is scaled.
Returns
-------
:class:`~.VGroup`
The :class:`~.VGroup` containing the Riemann Rectangles.
"""
dx = self.x_range[2] / 500
return self.get_riemann_rectangles(
graph,
x_range=x_range,
dx=dx * dx_scaling,
bounded_graph=bounded,
blend=True,
color=color,
show_signed_area=False,
).set_opacity(opacity=opacity)
def angle_of_tangent(
self, x: float, graph: "ParametricFunction", dx: float = 1e-8
) -> float:
"""Returns the angle to the x-axis of the tangent
to the plotted curve at a particular x-value.
Parameters
----------
x
The x-value at which the tangent must touch the curve.
graph
The :class:`~.ParametricFunction` for which to calculate the tangent.
dx
The small change in `x` with which a small change in `y`
will be compared in order to obtain the tangent.
Returns
-------
:class:`float`
The angle of the tangent with the x axis.
"""
p0 = self.input_to_graph_point(x, graph)
p1 = self.input_to_graph_point(x + dx, graph)
return angle_of_vector(p1 - p0)
def slope_of_tangent(
self, x: float, graph: "ParametricFunction", **kwargs
) -> float:
"""Returns the slope of the tangent to the plotted curve
at a particular x-value.
Parameters
----------
x
The x-value at which the tangent must touch the curve.
graph
The :class:`~.ParametricFunction` for which to calculate the tangent.
Returns
-------
:class:`float`
The slope of the tangent with the x axis.
"""
return np.tan(self.angle_of_tangent(x, graph, **kwargs))
def get_derivative_graph(
self, graph: "ParametricFunction", color: Color = GREEN, **kwargs
) -> ParametricFunction:
"""Returns the curve of the derivative of the passed
graph.
Parameters
----------
graph
The graph for which the derivative will be found.
color
The color of the derivative curve.
**kwargs
Any valid keyword argument of :class:`~.ParametricFunction`
Returns
-------
:class:`~.ParametricFunction`
The curve of the derivative.
"""
def deriv(x):
return self.slope_of_tangent(x, graph)
return self.get_graph(deriv, color=color, **kwargs)
def get_secant_slope_group(
self,
x: float,
graph: ParametricFunction,
dx: Optional[float] = None,
dx_line_color: Color = YELLOW,
dy_line_color: Optional[Color] = None,
dx_label: Optional[Union[float, str]] = None,
dy_label: Optional[Union[float, str]] = None,
include_secant_line: bool = True,
secant_line_color: Color = GREEN,
secant_line_length: float = 10,
) -> VGroup:
"""Creates two lines representing `dx` and `df`, the labels for `dx` and `df`, and
the secant to the curve at a particular x-value.
Parameters
----------
x
The x-value at which the secant intersects the graph for the first time.
graph
The curve for which the secant will be found.
dx
The change in `x` after which the secant exits.
dx_line_color
The color of the line that indicates the change in `x`.
dy_line_color
The color of the line that indicates the change in `y`. Defaults to the color of :attr:`graph`.
dx_label
The label for the `dx` line.
dy_label
The label for the `dy` line.
include_secant_line
Whether or not to include the secant line in the graph,
or just have the df and dx lines and labels.
secant_line_color
The color of the secant line.
secant_line_length
The length of the secant line.
Returns
-------
:class:`~.VGroup`
A group containing the elements: `dx_line`, `df_line`, and
if applicable also :attr:`dx_label`, :attr:`df_label`, `secant_line`.
"""
group = VGroup()
dx = dx or float(self.x_range[1] - self.x_range[0]) / 10
dx_line_color = dx_line_color
dy_line_color = dy_line_color or graph.get_color()
p1 = self.input_to_graph_point(x, graph)
p2 = self.input_to_graph_point(x + dx, graph)
interim_point = p2[0] * RIGHT + p1[1] * UP
group.dx_line = Line(p1, interim_point, color=dx_line_color)
group.df_line = Line(interim_point, p2, color=dy_line_color)
group.add(group.dx_line, group.df_line)
labels = VGroup()
if dx_label is not None:
group.dx_label = self.create_label_tex(dx_label)
labels.add(group.dx_label)
group.add(group.dx_label)
if dy_label is not None:
group.df_label = self.create_label_tex(dy_label)
labels.add(group.df_label)
group.add(group.df_label)
if len(labels) > 0:
max_width = 0.8 * group.dx_line.width
max_height = 0.8 * group.df_line.height
if labels.width > max_width:
labels.width = max_width
if labels.height > max_height:
labels.height = max_height
if dx_label is not None:
group.dx_label.next_to(
group.dx_line, np.sign(dx) * DOWN, buff=group.dx_label.height / 2
)
group.dx_label.set_color(group.dx_line.get_color())
if dy_label is not None:
group.df_label.next_to(
group.df_line, np.sign(dx) * RIGHT, buff=group.df_label.height / 2
)
group.df_label.set_color(group.df_line.get_color())
if include_secant_line:
secant_line_color = secant_line_color
group.secant_line = Line(p1, p2, color=secant_line_color)
group.secant_line.scale_in_place(
secant_line_length / group.secant_line.get_length()
)
group.add(group.secant_line)
return group
def get_vertical_lines_to_graph(
self,
graph: ParametricFunction,
x_range: Optional[Sequence[float]] = None,
num_lines: int = 20,
**kwargs,
) -> VGroup:
"""Obtains multiple lines from the x-axis to the curve.
Parameters
----------
graph
The graph on which the line should extend to.
x_range
A list containing the lower and and upper bounds of the lines -> ``x_range = [x_min, x_max]``.
num_lines
The number of evenly spaced lines.
Returns
-------
:class:`~.VGroup`
The :class:`~.VGroup` of the evenly spaced lines.
"""
x_range = x_range if x_range is not None else self.x_range
return VGroup(
*[
self.get_vertical_line(self.i2gp(x, graph), **kwargs)
for x in np.linspace(x_range[0], x_range[1], num_lines)
]
)
def get_T_label(
self,
x_val: float,
graph: "ParametricFunction",
label: Optional[Union[float, str, "Mobject"]] = None,
label_color: Color = WHITE,
triangle_size: float = MED_SMALL_BUFF,
triangle_color: Color = WHITE,
line_func: "Line" = Line,
line_color: Color = YELLOW,
) -> VGroup:
"""Creates a labelled triangle marker with a vertical line from the x-axis
to a curve at a given x-value.
Parameters
----------
x_val
The position along the curve at which the label, line and triangle will be constructed.
graph
The :class:`~.ParametricFunction` for which to construct the label.
label
The label of the vertical line and triangle.
label_color
The color of the label.
triangle_size
The size of the triangle.
triangle_color
The color of the triangle.
line_func
The function used to construct the vertical line.
line_color
The color of the vertical line.
Examples
-------
.. manim:: T_labelExample
:save_last_frame:
class T_labelExample(Scene):
def construct(self):
# defines the axes and linear function
axes = Axes(x_range=[-1, 10], y_range=[-1, 10], x_length=9, y_length=6)
func = axes.get_graph(lambda x: x, color=BLUE)
# creates the T_label
t_label = axes.get_T_label(x_val=4, graph=func, label=Tex("x-value"))
self.add(axes, func, t_label)
Returns
-------
:class:`~.VGroup`
A :class:`~.VGroup` of the label, triangle and vertical line mobjects.
"""
T_label_group = VGroup()
triangle = RegularPolygon(n=3, start_angle=np.pi / 2, stroke_width=0).set_fill(
color=triangle_color, opacity=1
)
triangle.height = triangle_size
triangle.move_to(self.coords_to_point(x_val, 0), UP)
if label is not None:
t_label = self.create_label_tex(label).set_color(label_color)
t_label.next_to(triangle, DOWN)
T_label_group.add(t_label)
v_line = self.get_vertical_line(
self.i2gp(x_val, graph), color=line_color, line_func=line_func
)
T_label_group.add(triangle, v_line)
return T_label_group
class Axes(VGroup, CoordinateSystem, metaclass=ConvertToOpenGL):
"""Creates a set of axes.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the x-axis.
y_range
The :code:`[y_min, y_max, y_step]` values of the y-axis.
x_length
The length of the x-axis.
y_length
The length of the y-axis.
axis_config
Arguments to be passed to :class:`~.NumberLine` that influences both axes.
x_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the x-axis.
y_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the y-axis.
tips
Whether or not to include the tips on both axes.
kwargs : Any
Additional arguments to be passed to :class:`CoordinateSystem` and :class:`~.VGroup`.
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = None,
y_range: Optional[Sequence[float]] = None,
x_length: Optional[float] = round(config.frame_width) - 2,
y_length: Optional[float] = round(config.frame_height) - 2,
axis_config: Optional[dict] = None,
x_axis_config: Optional[dict] = None,
y_axis_config: Optional[dict] = None,
tips: bool = True,
**kwargs,
):
VGroup.__init__(self, **kwargs)
CoordinateSystem.__init__(self, x_range, y_range, x_length, y_length)
self.axis_config = {
"include_tip": tips,
"numbers_to_exclude": [0],
"exclude_origin_tick": True,
}
self.x_axis_config = {}
self.y_axis_config = {"rotation": 90 * DEGREES, "label_direction": LEFT}
self.update_default_configs(
(self.axis_config, self.x_axis_config, self.y_axis_config),
(axis_config, x_axis_config, y_axis_config),
)
self.x_axis_config = merge_dicts_recursively(
self.axis_config, self.x_axis_config
)
self.y_axis_config = merge_dicts_recursively(
self.axis_config, self.y_axis_config
)
self.x_axis = self.create_axis(self.x_range, self.x_axis_config, self.x_length)
self.y_axis = self.create_axis(self.y_range, self.y_axis_config, self.y_length)
# Add as a separate group in case various other
# mobjects are added to self, as for example in
# NumberPlane below
self.axes = VGroup(self.x_axis, self.y_axis)
self.add(*self.axes)
# finds the middle-point on each axis
lines_center_point = [((axis.x_max + axis.x_min) / 2) for axis in self.axes]
self.shift(-self.coords_to_point(*lines_center_point))
@staticmethod
def update_default_configs(default_configs, passed_configs):
for default_config, passed_config in zip(default_configs, passed_configs):
if passed_config is not None:
update_dict_recursively(default_config, passed_config)
def create_axis(
self,
range_terms: Sequence[float],
axis_config: dict,
length: float,
) -> NumberLine:
"""Creates an axis and dynamically adjusts its position depending on where 0 is located on the line.
Parameters
----------
range_terms
The range of the the axis : `(x_min, x_max, x_step)`.
axis_config
Additional parameters that are passed to :class:`NumberLine`.
length
The length of the axis.
Returns
-------
:class:`NumberLine`
Returns a number line with the provided x and y axis range.
"""
axis_config["length"] = length
axis = NumberLine(range_terms, **axis_config)
# without the call to origin_shift, graph does not exist when min > 0 or max < 0
# shifts the axis so that 0 is centered
axis.shift(-axis.number_to_point(self.origin_shift(range_terms)))
return axis
def coords_to_point(self, *coords: Sequence[float]) -> np.ndarray:
"""Transforms the vector formed from ``coords`` formed by the :class:`Axes`
into the corresponding vector with respect to the default basis.
Returns
-------
np.ndarray
A point that results from a change of basis from the coordinate system
defined by the :class:`Axes` to that of ``manim``'s default coordinate system
"""
origin = self.x_axis.number_to_point(self.origin_shift(self.x_range))
result = np.array(origin)
for axis, coord in zip(self.get_axes(), coords):
result += axis.number_to_point(coord) - origin
return result
def point_to_coords(self, point: float) -> Tuple:
"""Transforms the coordinates of the point which are with respect to ``manim``'s default
basis into the coordinates of that point with respect to the basis defined by :class:`Axes`.
Parameters
----------
point
The point whose coordinates will be found.
Returns
-------
Tuple
Coordinates of the point with respect to :class:`Axes`'s basis
"""
return tuple([axis.point_to_number(point) for axis in self.get_axes()])
def get_axes(self) -> VGroup:
"""Gets the axes.
Returns
-------
:class:`~.VGroup`
A pair of axes.
"""
return self.axes
def get_line_graph(
self,
x_values: Iterable[float],
y_values: Iterable[float],
z_values: Optional[Iterable[float]] = None,
line_color: Color = YELLOW,
add_vertex_dots: bool = True,
vertex_dot_radius: float = DEFAULT_DOT_RADIUS,
vertex_dot_style: Optional[dict] = None,
**kwargs,
) -> VDict:
"""Draws a line graph.
The graph connects the vertices formed from zipping
``x_values``, ``y_values`` and ``z_values``. Also adds :class:`Dots <.Dot>` at the
vertices if ``add_vertex_dots`` is set to ``True``.
Parameters
----------
x_values
Iterable of values along the x-axis.
y_values
Iterable of values along the y-axis.
z_values
Iterable of values (zeros if z_values is None) along the z-axis.
line_color
Color for the line graph.
add_vertex_dots
Whether or not to add :class:`~.Dot` at each vertex.
vertex_dot_radius
Radius for the :class:`~.Dot` at each vertex.
vertex_dot_style
Style arguments to be passed into :class:`~.Dot` at each vertex.
kwargs
Additional arguments to be passed into :class:`~.VMobject`.
Examples
--------
.. manim:: LineGraphExample
:save_last_frame:
class LineGraphExample(Scene):
def construct(self):
plane = NumberPlane(
x_range = (0, 7),
y_range = (0, 5),
x_length = 7,
axis_config={"include_numbers": True},
)
plane.center()
line_graph = plane.get_line_graph(
x_values = [0, 1.5, 2, 2.8, 4, 6.25],
y_values = [1, 3, 2.25, 4, 2.5, 1.75],
line_color=GOLD_E,
vertex_dot_style=dict(stroke_width=3, fill_color=PURPLE),
stroke_width = 4,
)
self.add(plane, line_graph)
"""
x_values, y_values = map(np.array, (x_values, y_values))
if z_values is None:
z_values = np.zeros(x_values.shape)
line_graph = VDict()
graph = VGroup(color=line_color, **kwargs)
vertices = [
self.coords_to_point(x, y, z)
for x, y, z in zip(x_values, y_values, z_values)
]
graph.set_points_as_corners(vertices)
graph.z_index = -1
line_graph["line_graph"] = graph
if add_vertex_dots:
vertex_dot_style = vertex_dot_style or {}
vertex_dots = VGroup(
*[
Dot(point=vertex, radius=vertex_dot_radius, **vertex_dot_style)
for vertex in vertices
]
)
line_graph["vertex_dots"] = vertex_dots
return line_graph
@staticmethod
def origin_shift(axis_range: Sequence[float]) -> float:
"""Determines how to shift graph mobjects to compensate when 0 is not on the axis.
Parameters
----------
axis_range
The range of the axis : ``(x_min, x_max, x_step)``.
"""
if axis_range[0] > 0:
return axis_range[0]
if axis_range[1] < 0:
return axis_range[1]
else:
return 0
class ThreeDAxes(Axes):
"""A 3-dimensional set of axes.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the x-axis.
y_range
The :code:`[y_min, y_max, y_step]` values of the y-axis.
z_range
The :code:`[z_min, z_max, z_step]` values of the z-axis.
x_length
The length of the x-axis.
y_length
The length of the y-axis.
z_length
The length of the z-axis.
z_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the z-axis.
z_normal
The direction of the normal.
num_axis_pieces
The number of pieces used to construct the axes.
light_source
The direction of the light source.
depth
Currently non-functional.
gloss
Currently non-functional.
kwargs : Any
Additional arguments to be passed to :class:`Axes`.
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = (-6, 6, 1),
y_range: Optional[Sequence[float]] = (-5, 5, 1),
z_range: Optional[Sequence[float]] = (-4, 4, 1),
x_length: Optional[float] = config.frame_height + 2.5,
y_length: Optional[float] = config.frame_height + 2.5,
z_length: Optional[float] = config.frame_height - 1.5,
z_axis_config: Optional[dict] = None,
z_normal: Sequence[float] = DOWN,
num_axis_pieces: int = 20,
light_source: Sequence[float] = 9 * DOWN + 7 * LEFT + 10 * OUT,
# opengl stuff (?)
depth=None,
gloss=0.5,
**kwargs,
):
Axes.__init__(
self,
x_range=x_range,
x_length=x_length,
y_range=y_range,
y_length=y_length,
**kwargs,
)
self.z_range = z_range
self.z_length = z_length
self.z_axis_config = {}
self.update_default_configs((self.z_axis_config,), (z_axis_config,))
self.z_axis_config = merge_dicts_recursively(
self.axis_config, self.z_axis_config
)
self.z_normal = z_normal
self.num_axis_pieces = num_axis_pieces
self.light_source = light_source
self.dimension = 3
z_axis = self.create_axis(self.z_range, self.z_axis_config, self.z_length)
z_axis.rotate_about_zero(-PI / 2, UP)
z_axis.rotate_about_zero(angle_of_vector(self.z_normal))
z_axis.shift(self.x_axis.number_to_point(self.origin_shift(x_range)))
self.axes.add(z_axis)
self.add(z_axis)
self.z_axis = z_axis
if not config.renderer == "opengl":
self.add_3d_pieces()
self.set_axis_shading()
def add_3d_pieces(self):
for axis in self.axes:
axis.pieces = VGroup(*axis.get_pieces(self.num_axis_pieces))
axis.add(axis.pieces)
axis.set_stroke(width=0, family=False)
axis.set_shade_in_3d(True)
def set_axis_shading(self):
def make_func(axis):
vect = self.light_source
return lambda: (
axis.get_edge_center(-vect),
axis.get_edge_center(vect),
)
for axis in self:
for submob in axis.family_members_with_points():
submob.get_gradient_start_and_end_points = make_func(axis)
submob.get_unit_normal = lambda a: np.ones(3)
submob.set_sheen(0.2)
class NumberPlane(Axes):
"""Creates a cartesian plane with background lines.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the plane in the horizontal direction.
y_range
The :code:`[y_min, y_max, y_step]` values of the plane in the vertical direction.
x_length
The width of the plane.
y_length
The height of the plane.
background_line_style
Arguments that influence the construction of the background lines of the plane.
faded_line_style
Similar to :attr:`background_line_style`, affects the construction of the scene's background lines.
faded_line_ratio
Determines the number of boxes within the background lines: :code:`2` = 4 boxes, :code:`3` = 9 boxes.
make_smooth_after_applying_functions
Currently non-functional.
kwargs : Any
Additional arguments to be passed to :class:`Axes`.
.. note:: If :attr:`x_length` or :attr:`y_length` are not defined, the plane automatically adjusts its lengths based
on the :attr:`x_range` and :attr:`y_range` values to set the unit_size to 1.
Examples
--------
.. manim:: NumberPlaneExample
:save_last_frame:
class NumberPlaneExample(Scene):
def construct(self):
number_plane = NumberPlane(
x_range=[-10, 10, 1],
y_range=[-10, 10, 1],
background_line_style={
"stroke_color": TEAL,
"stroke_width": 4,
"stroke_opacity": 0.6
}
)
self.add(number_plane)
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = (
-config["frame_x_radius"],
config["frame_x_radius"],
1,
),
y_range: Optional[Sequence[float]] = (
-config["frame_y_radius"],
config["frame_y_radius"],
1,
),
x_length: Optional[float] = None,
y_length: Optional[float] = None,
background_line_style: Optional[dict] = None,
faded_line_style: Optional[dict] = None,
faded_line_ratio: int = 1,
make_smooth_after_applying_functions=True,
**kwargs,
):
# configs
self.axis_config = {
"stroke_color": WHITE,
"stroke_width": 2,
"include_ticks": False,
"include_tip": False,
"line_to_number_buff": SMALL_BUFF,
"label_direction": DR,
"number_scale_value": 0.5,
}
self.y_axis_config = {"label_direction": DR}
self.background_line_style = {
"stroke_color": BLUE_D,
"stroke_width": 2,
"stroke_opacity": 1,
}
self.update_default_configs(
(self.axis_config, self.y_axis_config, self.background_line_style),
(
kwargs.pop("axis_config", None),
kwargs.pop("y_axis_config", None),
background_line_style,
),
)
# Defaults to a faded version of line_config
self.faded_line_style = faded_line_style
self.faded_line_ratio = faded_line_ratio
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
# init
super().__init__(
x_range=x_range,
y_range=y_range,
x_length=x_length,
y_length=y_length,
axis_config=self.axis_config,
y_axis_config=self.y_axis_config,
**kwargs,
)
# dynamically adjusts x_length and y_length so that the unit_size is one by default
if x_length is None:
x_length = self.x_range[1] - self.x_range[0]
if y_length is None:
y_length = self.y_range[1] - self.y_range[0]
self.init_background_lines()
def init_background_lines(self):
"""Will init all the lines of NumberPlanes (faded or not)"""
if self.faded_line_style is None:
style = dict(self.background_line_style)
# For anything numerical, like stroke_width
# and stroke_opacity, chop it in half
for key in style:
if isinstance(style[key], numbers.Number):
style[key] *= 0.5
self.faded_line_style = style
self.background_lines, self.faded_lines = self.get_lines()
self.background_lines.set_style(
**self.background_line_style,
)
self.faded_lines.set_style(
**self.faded_line_style,
)
self.add_to_back(
self.faded_lines,
self.background_lines,
)
def get_lines(self) -> Tuple[VGroup, VGroup]:
"""Generate all the lines, faded and not faded. Two sets of lines are generated: one parallel to the X-axis, and parallel to the Y-axis.
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non faded lines) and second (i.e the faded lines) sets of lines, respectively.
"""
x_axis = self.get_x_axis()
y_axis = self.get_y_axis()
x_lines1, x_lines2 = self.get_lines_parallel_to_axis(
x_axis,
y_axis,
self.x_axis.x_step,
self.faded_line_ratio,
)
y_lines1, y_lines2 = self.get_lines_parallel_to_axis(
y_axis,
x_axis,
self.y_axis.x_step,
self.faded_line_ratio,
)
# TODO this was added so that we can run tests on NumberPlane
# In the future these attributes will be tacked onto self.background_lines
self.x_lines = x_lines1
self.y_lines = y_lines1
lines1 = VGroup(*x_lines1, *y_lines1)
lines2 = VGroup(*x_lines2, *y_lines2)
return lines1, lines2
def get_lines_parallel_to_axis(
self,
axis_parallel_to: NumberLine,
axis_perpendicular_to: NumberLine,
freq: float,
ratio_faded_lines: int,
) -> Tuple[VGroup, VGroup]:
"""Generate a set of lines parallel to an axis.
Parameters
----------
axis_parallel_to
The axis with which the lines will be parallel.
axis_perpendicular_to
The axis with which the lines will be perpendicular.
ratio_faded_lines
The ratio between the space between faded lines and the space between non-faded lines.
freq
Frequency of non-faded lines (number of non-faded lines per graph unit).
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non-faded lines parallel to `axis_parallel_to`) and second (i.e the faded lines parallel to `axis_parallel_to`) sets of lines, respectively.
"""
line = Line(axis_parallel_to.get_start(), axis_parallel_to.get_end())
if ratio_faded_lines == 0: # don't show faded lines
ratio_faded_lines = 1 # i.e. set ratio to 1
step = (1 / ratio_faded_lines) * freq
lines1 = VGroup()
lines2 = VGroup()
unit_vector_axis_perp_to = axis_perpendicular_to.get_unit_vector()
# min/max used in case range does not include 0. i.e. if (2,6):
# the range becomes (0,4), not (0,6), to produce the correct number of lines
ranges = (
np.arange(
0,
min(
axis_perpendicular_to.x_max - axis_perpendicular_to.x_min,
axis_perpendicular_to.x_max,
),
step,
),
np.arange(
0,
max(
axis_perpendicular_to.x_min - axis_perpendicular_to.x_max,
axis_perpendicular_to.x_min,
),
-step,
),
)
for inputs in ranges:
for k, x in enumerate(inputs):
new_line = line.copy()
new_line.shift(unit_vector_axis_perp_to * x)
if k % ratio_faded_lines == 0:
lines1.add(new_line)
else:
lines2.add(new_line)
return lines1, lines2
def get_center_point(self) -> np.ndarray:
"""Gets the origin of :class:`NumberPlane`.
Returns
-------
np.ndarray
The center point.
"""
return self.coords_to_point(0, 0)
def get_x_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_y_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_axes(self) -> VGroup:
# Method Already defined at Axes.get_axes so we could remove this a later PR.
"""Gets the pair of axes.
Returns
-------
:class:`~.VGroup`
Axes
"""
return self.axes
def get_vector(self, coords, **kwargs):
kwargs["buff"] = 0
return Arrow(
self.coords_to_point(0, 0), self.coords_to_point(*coords), **kwargs
)
def prepare_for_nonlinear_transform(self, num_inserted_curves=50):
for mob in self.family_members_with_points():
num_curves = mob.get_num_curves()
if num_inserted_curves > num_curves:
mob.insert_n_curves(num_inserted_curves - num_curves)
return self
class PolarPlane(Axes):
r"""Creates a polar plane with background lines.
Parameters
----------
azimuth_step
The number of divisions in the azimuth (also known as the `angular coordinate` or `polar angle`). If ``None`` is specified then it will use the default
specified by ``azimuth_units``:
- ``"PI radians"`` or ``"TAU radians"``: 20
- ``"degrees"``: 36
- ``"gradians"``: 40
- ``None``: 1
A non-integer value will result in a partial division at the end of the circle.
size
The diameter of the plane.
radius_step
The distance between faded radius lines.
radius_max
The maximum value of the radius.
azimuth_units
Specifies a default labelling system for the azimuth. Choices are:
- ``"PI radians"``: Fractional labels in the interval :math:`\left[0, 2\pi\right]` with :math:`\pi` as a constant.
- ``"TAU radians"``: Fractional labels in the interval :math:`\left[0, \tau\right]` (where :math:`\tau = 2\pi`) with :math:`\tau` as a constant.
- ``"degrees"``: Decimal labels in the interval :math:`\left[0, 360\right]` with a degree (:math:`^{\circ}`) symbol.
- ``"gradians"``: Decimal labels in the interval :math:`\left[0, 400\right]` with a superscript "g" (:math:`^{g}`).
- ``None``: Decimal labels in the interval :math:`\left[0, 1\right]`.
azimuth_compact_fraction
If the ``azimuth_units`` choice has fractional labels, choose whether to combine the constant in a compact form :math:`\tfrac{xu}{y}` as opposed to :math:`\tfrac{x}{y}u`, where :math:`u` is the constant.
azimuth_offset
The angle offset of the azimuth, expressed in radians.
azimuth_direction
The direction of the azimuth.
- ``"CW"``: Clockwise.
- ``"CCW"``: Anti-clockwise.
azimuth_label_buff
The buffer for the azimuth labels.
azimuth_label_scale
The scale of the azimuth labels.
radius_config
The axis config for the radius.
Examples
--------
.. manim:: PolarPlaneExample
:ref_classes: PolarPlane
:save_last_frame:
class PolarPlaneExample(Scene):
def construct(self):
polarplane_pi = PolarPlane(
azimuth_units="PI radians",
size=6,
azimuth_label_scale=0.7,
radius_config={"number_scale_value": 0.7},
).add_coordinates()
self.add(polarplane_pi)
"""
def __init__(
self,
radius_max: float = config["frame_y_radius"],
size: Optional[float] = None,
radius_step: float = 1,
azimuth_step: Optional[float] = None,
azimuth_units: Optional[str] = "PI radians",
azimuth_compact_fraction: bool = True,
azimuth_offset: float = 0,
azimuth_direction: str = "CCW",
azimuth_label_buff: float = SMALL_BUFF,
azimuth_label_scale: float = 0.5,
radius_config: Optional[dict] = None,
background_line_style: Optional[dict] = None,
faded_line_style: Optional[dict] = None,
faded_line_ratio: int = 1,
make_smooth_after_applying_functions: bool = True,
**kwargs,
):
# error catching
if azimuth_units in ["PI radians", "TAU radians", "degrees", "gradians", None]:
self.azimuth_units = azimuth_units
else:
raise ValueError(
"Invalid azimuth units. Expected one of: PI radians, TAU radians, degrees, gradians or None."
)
if azimuth_direction in ["CW", "CCW"]:
self.azimuth_direction = azimuth_direction
else:
raise ValueError("Invalid azimuth units. Expected one of: CW, CCW.")
# configs
self.radius_config = {
"stroke_color": WHITE,
"stroke_width": 2,
"include_ticks": False,
"include_tip": False,
"line_to_number_buff": SMALL_BUFF,
"label_direction": DL,
"number_scale_value": 0.5,
}
self.background_line_style = {
"stroke_color": BLUE_D,
"stroke_width": 2,
"stroke_opacity": 1,
}
self.azimuth_step = (
(
{
"PI radians": 20,
"TAU radians": 20,
"degrees": 36,
"gradians": 40,
None: 1,
}[azimuth_units]
)
if azimuth_step is None
else azimuth_step
)
self.update_default_configs(
(self.radius_config, self.background_line_style),
(radius_config, background_line_style),
)
# Defaults to a faded version of line_config
self.faded_line_style = faded_line_style
self.faded_line_ratio = faded_line_ratio
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
self.azimuth_offset = azimuth_offset
self.azimuth_label_buff = azimuth_label_buff
self.azimuth_label_scale = azimuth_label_scale
self.azimuth_compact_fraction = azimuth_compact_fraction
# init
super().__init__(
x_range=np.array((-radius_max, radius_max, radius_step)),
y_range=np.array((-radius_max, radius_max, radius_step)),
x_length=size,
y_length=size,
axis_config=self.radius_config,
**kwargs,
)
# dynamically adjusts size so that the unit_size is one by default
if size is None:
size = 0
self.init_background_lines()
def init_background_lines(self):
"""Will init all the lines of NumberPlanes (faded or not)"""
if self.faded_line_style is None:
style = dict(self.background_line_style)
# For anything numerical, like stroke_width
# and stroke_opacity, chop it in half
for key in style:
if isinstance(style[key], numbers.Number):
style[key] *= 0.5
self.faded_line_style = style
self.background_lines, self.faded_lines = self.get_lines()
self.background_lines.set_style(
**self.background_line_style,
)
self.faded_lines.set_style(
**self.faded_line_style,
)
self.add_to_back(
self.faded_lines,
self.background_lines,
)
def get_lines(self) -> Tuple[VGroup, VGroup]:
"""Generate all the lines and circles, faded and not faded.
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non faded lines and circles) and second (i.e the faded lines and circles) sets of lines and circles, respectively.
"""
center = self.get_center_point()
ratio_faded_lines = self.faded_line_ratio
offset = self.azimuth_offset
if ratio_faded_lines == 0: # don't show faded lines
ratio_faded_lines = 1 # i.e. set ratio to 1
rstep = (1 / ratio_faded_lines) * self.x_axis.x_step
astep = (1 / ratio_faded_lines) * (TAU * (1 / self.azimuth_step))
rlines1 = VGroup()
rlines2 = VGroup()
alines1 = VGroup()
alines2 = VGroup()
rinput = | np.arange(0, self.x_axis.x_max + rstep, rstep) | numpy.arange |
import io
import os
import pickle
import numpy as np
import torch
from PIL import Image
from learn2learn.vision.datasets import TieredImagenet
class TieredImageNet(TieredImagenet):
def __init__(self, root, partition="train", mode='coarse', transform=None, target_transform=None, download=False):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.mode = mode
tiered_imaganet_path = os.path.join(self.root, 'tiered-imagenet')
short_partition = 'val' if partition == 'validation' else partition
labels_path = os.path.join(tiered_imaganet_path, short_partition + '_labels.pkl')
images_path = os.path.join(tiered_imaganet_path, short_partition + '_images_png.pkl')
with open(images_path, 'rb') as images_file:
self.images = pickle.load(images_file)
with open(labels_path, 'rb') as labels_file:
self.labels = pickle.load(labels_file)
self.coarse2fine = {}
for c, f in zip(self.labels['label_general'], self.labels['label_specific']):
if c in self.coarse2fine:
if f not in self.coarse2fine[c]:
self.coarse2fine[c].append(f)
else:
self.coarse2fine[c] = [f]
if self.mode == 'coarse':
self.labels = self.labels['label_general']
elif self.mode == 'fine':
self.labels = self.labels['label_specific']
else:
raise NotImplementedError
@property
def num_classes(self):
return len(np.unique(self.labels))
class MetaTieredImageNet(TieredImageNet):
def __init__(self, args, partition='train', train_transform=None, test_transform=None, fix_seed=True):
super(MetaTieredImageNet, self).__init__(
root=args.data_root,
partition=partition,
mode=args.mode)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
self.train_transform = train_transform
self.test_transform = test_transform
self.data = {}
for idx in range(len(self.images)):
if self.labels[idx] not in self.data:
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].append(self.images[idx])
self.classes = list(self.data.keys())
def __getitem__(self, item):
if self.fix_seed:
np.random.seed(item)
if len(self.classes) > self.n_ways:
cls_sampled = np.random.choice(self.classes, self.n_ways, False)
else:
cls_sampled = | np.array(self.classes) | numpy.array |
from SimpleITK import sitkNearestNeighbor, ResampleImageFilter, SmoothingRecursiveGaussianImageFilter, \
GetArrayFromImage, GetImageFromArray, sitkLinear
from skimage import morphology, measure, segmentation, filters
from scipy.ndimage.morphology import binary_erosion, binary_dilation
import numpy as np
trash_threshold = .2
def normalize(img_arr):
max_hu = 400.
min_hu = -1000.
img_arr[img_arr > max_hu] = max_hu
img_arr[img_arr < min_hu] = min_hu
img_arr_normalized = (img_arr - min_hu) / (max_hu - min_hu)
return img_arr_normalized
def resample_image(sitk_img, new_spacing, new_size, method='Linear'):
origin = sitk_img.GetOrigin()
direction = sitk_img.GetDirection()
resampler = ResampleImageFilter()
resampler.SetOutputDirection(direction)
resampler.SetOutputOrigin(origin)
resampler.SetSize(new_size)
if method == 'Linear':
resampler.SetInterpolator(sitkLinear)
else:
resampler.SetInterpolator(sitkNearestNeighbor)
resampler.SetOutputSpacing(new_spacing)
return resampler.Execute(sitk_img)
def gaussian_smooth(sitk_img, sigma=1.5):
img_filter = SmoothingRecursiveGaussianImageFilter()
img_filter.SetSigma(float(sigma))
return img_filter.Execute(sitk_img)
def lung_segmentation(sitk_img, lower_bound, upper_bound):
new_spacing = np.asarray([2.5, 2.5, 5])
orig_size = sitk_img.GetSize()
orig_spacing = sitk_img.GetSpacing()
new_size = [int(np.ceil(orig_size[0] / new_spacing[0] * orig_spacing[0])),
int(np.ceil(orig_size[1] / new_spacing[1] * orig_spacing[1])),
int(np.ceil(orig_size[2] / new_spacing[2] * orig_spacing[2]))]
new_sitk_img = resample_image(sitk_img, new_spacing, new_size)
new_sitk_img = gaussian_smooth(new_sitk_img)
imgs_to_process = GetArrayFromImage(new_sitk_img)
imgs_to_process[imgs_to_process < lower_bound] = lower_bound
binary_threshold = filters.threshold_otsu(imgs_to_process)
img = imgs_to_process < binary_threshold
old_bbox = imgs_to_process.shape
del imgs_to_process
temp = np.zeros(old_bbox)
for c in range(old_bbox[0]):
labels = ~img[c, :, :]
if np.sum(labels):
labels = measure.label(labels, neighbors=4)
regions = measure.regionprops(labels)
labels = [r.area for r in regions]
index = labels.index(max(labels))
bbox = regions[index].bbox
dist = 1
temp[c, bbox[0] + dist:bbox[2] - dist, bbox[1] + dist:bbox[3] - dist] = segmentation.clear_border(
img[c, bbox[0] + dist:bbox[2] - dist, bbox[1] + dist:bbox[3] - dist])
img = temp > 0
del temp
otsu_img = img.copy()
img = morphology.binary_closing(img, selem=np.ones((1, 2, 2)))
labels = measure.label(img, neighbors=4)
regions = measure.regionprops(labels)
labels = [(r.area, r.bbox) for r in regions]
labels.sort(reverse=True)
max_bbox = labels[0][1]
max_bbox_zmin = max_bbox[0]
max_bbox_zmax = max_bbox[3]-1
for i in range(int(max_bbox_zmax - (max_bbox_zmax - max_bbox_zmin) / 3), max_bbox_zmax):
_slice = img[i, :, :]
slice_labels, num = measure.label(_slice, return_num=True)
regions = measure.regionprops(slice_labels)
slice_labels = [[r.area, r.label] for r in regions]
if len(slice_labels) > 2:
slice_labels.sort(reverse=True)
max_area = slice_labels[0][0]
_slice = _slice.astype(np.bool)
thresh = int(max_area) / 4
_slice = morphology.remove_small_objects(_slice, thresh)
img[i, :, :] = _slice
img = img.astype(np.bool)
labels = measure.label(img, neighbors=4)
regions = measure.regionprops(labels)
labels = [(r.area, r.bbox, r.coords) for r in regions]
labels.sort(reverse=True)
max_area = labels[0][0]
max_bbox = labels[0][1]
max_bbox_zmin = max_bbox[0]
max_bbox_zmax = max_bbox[3] - 1
for area, bbox, coords in labels:
region_center_z = (bbox[0]+bbox[3])/2
if area > max_area / 2:
continue
if region_center_z > max_bbox_zmax or region_center_z < max_bbox_zmin:
img[coords[:, 0], coords[:, 1], coords[:, 2]] = 0
_slice = np.sum(img, axis=0) > 0
slice_labels, num = measure.label(_slice, return_num=True)
if num > 1:
regions = measure.regionprops(slice_labels)
slice_labels = [r.area for r in regions]
slice_labels.sort(reverse=True)
max_area = slice_labels[0]
_slice = _slice.astype(np.bool)
thresh = int(max_area) / 4
_slice = morphology.remove_small_objects(_slice, thresh)
bbox = np.where(_slice)
x_min = np.min(bbox[1])
x_max = np.max(bbox[1])
y_min = | np.min(bbox[0]) | numpy.min |
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; <NAME>; <NAME>;
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" Implementation of GriffinLim vocoder """
import os
import numpy as np
import librosa
from scipy.io.wavfile import write as write_wav
class GriffinLim:
"""python implementation of griffinlim algorithm"""
def __init__(self, data_descriptions):
"""Reference: to paper "Multiband Excitation Vocoder"
"""
assert data_descriptions.audio_featurizer is not None
assert data_descriptions.audio_featurizer.feat is not None
assert data_descriptions.hparams.audio_config is not None
params_func = data_descriptions.audio_featurizer.feat.params
params = params_func(data_descriptions.hparams.audio_config)
self.channels = params.filterbank_channel_count
self.sample_rate = params.sample_rate
self.window_length = int(params.window_length * self.sample_rate)
self.hop_length = int(params.frame_length * self.sample_rate)
self.n_fft = self._get_nfft(self.window_length)
self.lower_frequency_limit = params.lower_frequency_limit
self.upper_frequency_limit = params.upper_frequency_limit
self.window_type = params.window_type
self.EPS = 1e-10
def _get_nfft(self, window_length):
"""n_fft is an exponential power of 2 closest to and larger than win_length"""
nfft = 2
while nfft < window_length:
nfft *= 2
return nfft
def __call__(self, feats, hparams, name=None):
linear_feats = self._logmel_to_linear(feats)
samples = self._griffin_lim(linear_feats, hparams.gl_iters)
samples = samples / 32768
if not os.path.exists(hparams.output_directory):
os.makedirs(hparams.output_directory)
output_path = os.path.join(hparams.output_directory, '%s.wav' % str(name))
write_wav(output_path,
self.sample_rate,
(samples * np.iinfo(np.int16).max).astype(np.int16))
seconds = float(samples.shape[0]) / self.sample_rate
return seconds
def _logmel_to_linear(self, feats):
"""Convert FBANK to linear spectrogram.
Args:
feats: FBANK feats, shape: [length, channels]
Returns:
linear_feats: Linear spectrogram
"""
assert feats.shape[1] == self.channels
linear_feats = | np.power(10.0, feats) | numpy.power |
import numpy as np
import os
import glob
from PIL import Image
from scipy import misc
def instr(str,substr,pos):
t=[]
counter=0
for s in str:
if s==substr:
t.append(counter)
counter += 1
return t[pos-1]
def power_plant_data_regression(do_normalize):
FILE="C:\\MLDatabases\\data\\uci\\power_plant\\CCPP\\Folds5x2_pp.csv"
data=np.loadtxt(FILE,dtype=np.float,delimiter=",",skiprows=1)
x=data[:,0:4]
y=data[:,4]
if do_normalize:
x=x-np.mean(x,axis=1,keepdims=True)
x=x/np.std(x,axis=1,keepdims=True)
x_train=x[0:8000,:]
y_train=y[0:8000]
x_test=x[8000:None,:]
y_test=y[8000:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def epileptic_EEG_classification(do_normalize):
"""
https://archive.ics.uci.edu/ml/datasets/Epileptic+Seizure+Recognition
"""
FILE="C:\\MLDatabases\\data\\uci\\epileptic\\data.csv"
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1)
data=np.asarray(data[:,1:None],dtype=np.float)
x=data[:,0:178]
y=data[:,178]
y[y>1]=0
if do_normalize:
x=x-np.mean(x,axis=1,keepdims=True)
x=x/np.std(x,axis=1,keepdims=True)
x_train=x[0:10000,:]
y_train=y[0:10000]
x_test=x[10000:None,:]
y_test=y[10000:None]
print(x_train.shape,np.unique(y_train),x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def energy_efficiency_regression_y1(do_normalize):
"""
https://archive.ics.uci.edu/ml/datasets/Energy+efficiency
"""
FILE="C:\\MLDatabases\\data\\uci\\energy efficiency\\ENB2012_data.csv"
data=np.loadtxt(FILE,dtype=np.float,delimiter=",",skiprows=1)
x=data[:,0:8]
y=data[:,8]
if do_normalize:
x=x-np.mean(x,axis=1,keepdims=True)
x=x/np.std(x,axis=1,keepdims=True)
x_train=x[0:668,:]
y_train=y[0:668]
x_test=x[668:None,:]
y_test=y[668:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def energy_efficiency_regression_y2(do_normalize):
"""
https://archive.ics.uci.edu/ml/datasets/Energy+efficiency
"""
FILE="C:\\MLDatabases\\data\\uci\\energy efficiency\\ENB2012_data.csv"
data=np.loadtxt(FILE,dtype=np.float,delimiter=",",skiprows=1)
x=data[:,0:8]
y=data[:,9]
if do_normalize:
x=x-np.mean(x,axis=1,keepdims=True)
x=x/np.std(x,axis=1,keepdims=True)
x_train=x[0:668,:]
y_train=y[0:668]
x_test=x[668:None,:]
y_test=y[668:None]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test)
def spam_notspam_youtube_rnn_classification(x_onehot_encode):
"""
https://archive.ics.uci.edu/ml/datasets/YouTube+Spam+Collection
"""
x=[]
y=[]
unique_chars=set()
max_len=0
char_to_idx=dict()
idx_to_chr=dict()
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube01-Psy.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>max_len:
max_len=len(l)
unique_chars=set(''.join(unique_chars)+l)
x.append(l)
y.append(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube02-KatyPerry.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>max_len:
max_len=len(l)
unique_chars=set(''.join(unique_chars)+l)
x.append(l)
y.append(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube03-LMFAO.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>max_len:
max_len=len(l)
unique_chars=set(''.join(unique_chars)+l)
x.append(l)
y.append(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube04-Eminem.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>max_len:
max_len=len(l)
unique_chars=set(''.join(unique_chars)+l)
x.append(l)
y.append(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\Youtube05-Shakira.csv"
with open(FILE,"r", encoding="utf8") as f:
line=f.readline()
for line in f:
l=line[instr(line,",",3):len(line)-2].strip(",\"").strip("\",")
if x_onehot_encode:
if len(l)>max_len:
max_len=len(l)
unique_chars=set(''.join(unique_chars)+l)
x.append(l)
y.append(int(line[-2]))
FILE="C:\\MLDatabases\\data\\uci\\spamNotspam\\SMSSpamCollection"
with open(FILE,"r", encoding="utf8") as f:
for line in f:
if line.startswith("ham"):
if x_onehot_encode:
if len(line[3:None].strip())>max_len:
max_len=len(line[3:None].strip())
unique_chars=set(''.join(unique_chars)+line[3:None].strip())
x.append(line[3:None].strip())
y.append(1)
else:
if x_onehot_encode:
if len(line[5:None].strip())>max_len:
max_len=len(line[5:None].strip())
unique_chars=set(''.join(unique_chars)+line[5:None].strip())
x.append(line[5:None].strip())
y.append(0)
if x_onehot_encode:
char_to_idx={chr:idx for idx,chr in enumerate(unique_chars)}
idx_to_chr={idx:chr for idx,chr in enumerate(unique_chars)}
for i,sen in enumerate(x):
t=[]
for chars in sen:
t.append(char_to_idx[chars])
x[i]=t
x_train=x[0:6000]
y_train=y[0:6000]
x_test=x[6000:None]
y_test=y[6000:None]
print(x_train[100])
print( ''.join([idx_to_chr[i] for i in x_train[100]] ))
return (x_train,y_train,x_test,y_test),(unique_chars,char_to_idx,idx_to_chr,max_len)
def plant_leaf_image_classification(do_clip):
"""
https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set
"""
PATH="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data"
dir_list=os.listdir(PATH)
plantname_to_idx={name:idx for (idx,name) in enumerate(dir_list)}
idx_to_plantname={idx:name for (idx,name) in enumerate(dir_list)}
np.random.seed(10)
labels=[]
images=np.zeros((1600,50,50))
start_ix=0
for subfolder in dir_list:
imagePaths = glob.glob(PATH + '\\' + subfolder +'\\*.jpg')
im_array = np.array( [misc.imresize(np.array(Image.open(imagePath), 'f'),(50,50)) for imagePath in imagePaths] )
images[start_ix:start_ix+len(im_array)] = im_array
start_ix += len(im_array)
for imagePath in imagePaths:
labels.append(plantname_to_idx[subfolder])
if do_clip[0]:
np.clip(images,do_clip[1],do_clip[2])
y=np.array(labels)
idx=np.linspace(0,1599,1600,dtype=np.int)
np.random.shuffle(idx)
np.random.shuffle(idx)
np.random.shuffle(idx)
idx_train=idx[0:1500]
idx_test=idx[1500:None]
x_train=images[idx_train]
y_train=y[idx_train]
x_test=images[idx_test]
y_test=y[idx_test]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_shape(do_normalize):
"""
https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set
"""
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Sha_64.txt"
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(np.unique(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(np.unique(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=np.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normalize:
data=data-np.mean(data,axis=1,keepdims=True)
data=data/np.std(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_texture(do_normalize):
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Tex_64.txt"
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(np.unique(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(np.unique(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=np.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normalize:
data=data-np.mean(data,axis=1,keepdims=True)
data=data/np.std(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def plant_leat_classification_margin(do_normalize):
FILE="C:\\MLDatabases\\data\\uci\\100 leaves plant\\100 leaves plant species\\data_Mar_64.txt"
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,usecols=(0,))
plantname_to_idx={name:idx for (idx,name) in enumerate(np.unique(data))}
idx_to_plantname={idx:name for (idx,name) in enumerate(np.unique(data))}
del data
def class_converter(s):
return plantname_to_idx[s.decode("utf-8")]
data=np.loadtxt(FILE,delimiter=",",skiprows=1,converters={0:class_converter})
if do_normalize:
data=data-np.mean(data,axis=1,keepdims=True)
data=data/np.std(data,axis=1,keepdims=True)
x_train=data[0:1500,1:None]
y_train=data[0:1500,0]
x_test=data[1500:None,1:None]
y_test=data[1500:None,0]
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
return (x_train,y_train,x_test,y_test),(plantname_to_idx,idx_to_plantname)
def truck_failure_anomaly_detection_clf(do_normalize):
"""
https://archive.ics.uci.edu/ml/datasets/IDA2016Challenge
"""
FILE="C:\\MLDatabases\\data\\uci\\truck\\to_uci\\aps_failure_training_set.csv"
def class_converter(s):
if s==b"neg":
return 0
else:
return 1
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,converters={0:class_converter})
data[data=="na"]=-1
data=np.asarray(data,dtype=np.float32)
x_train=np.copy(data[:,1:None])
y_train=np.copy(data[:,0])
if do_normalize:
x_train=x_train-np.mean(x_train,axis=1,keepdims=True)
x_train=x_train/np.std(x_train,axis=1,keepdims=True)
del data
FILE="C:\\MLDatabases\\data\\uci\\truck\\to_uci\\aps_failure_test_set.csv"
data=np.loadtxt(FILE,dtype=">U",delimiter=",",skiprows=1,converters={0:class_converter})
data[data=="na"]=-1
data=np.asarray(data,dtype=np.float32)
x_test=data[:,1:None]
y_test=data[:,0]
if do_normalize:
x_test=x_test-np.mean(x_test,axis=1,keepdims=True)
x_test=x_test/ | np.std(x_test,axis=1,keepdims=True) | numpy.std |
import numpy as np
from skimage.io import imread
from transforms3d.euler import mat2euler, euler2mat
def rectify_symmetric_rotation(R, rotation_groups):
'''
rectify pose according to https://arxiv.org/pdf/1908.07640.pdf Proposition 1
:param R: [3,3]
:param rotation_groups: [n,3,3]
:return:
'''
rg_inv = np.transpose(rotation_groups, [0, 2, 1]) # [n,3,3]
rg_diff = R[None, :, :] @ rg_inv - | np.identity(3) | numpy.identity |
"""
Implementations of the IPFP algorithm to solve for equilibrium and do comparative statics
in several variants of the `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model:
* homoskedastic with singles (as in CS 2006)
* homoskedastic without singles
* gender-heteroskedastic: with a scale parameter on the error term for women
* gender- and type-heteroskedastic: with a scale parameter on the error term for women
each solver, when fed the joint surplus and margins,
returns the equilibrium matching patterns, the adding-up errors on the margins,
and if requested (gr=True) the derivatives of the matching patterns in all primitives.
"""
import numpy as np
from math import sqrt
import sys
import scipy.linalg as spla
from ipfp_utils import print_stars, npexp, der_npexp, npmaxabs, \
nplog, nppow, der_nppow, nprepeat_col, nprepeat_row, describe_array
def ipfp_homo_nosingles_solver(Phi, men_margins, women_margins,
tol=1e-9, gr=False, verbose=False,
maxiter=1000):
"""
solve for equilibrium in a Choo and Siow market without singles
given systematic surplus and margins
:param np.array Phi: matrix of systematic surplus, shape (ncat_men, ncat_women)
:param np.array men_margins: vector of men margins, shape (ncat_men)
:param np.array women_margins: vector of women margins, shape (ncat_women)
:param float tol: tolerance on change in solution
:param boolean gr: if True, also evaluate derivatives of muxy wrt Phi
:param boolean verbose: prints stuff
:param int maxiter: maximum number of iterations
:return:
* muxy the matching patterns, shape (ncat_men, ncat_women)
* marg_err_x, marg_err_y the errors on the margins
* and the gradients of muxy wrt Phi if gr=True
"""
ncat_men = men_margins.shape[0]
ncat_women = women_margins.shape[0]
n_couples = np.sum(men_margins)
# check that there are as many men as women
if np.abs(np.sum(women_margins) - n_couples) > n_couples * tol:
print_stars(
f"{ipfp_homo_nosingles_solver}: there should be as many men as women")
if Phi.shape != (ncat_men, ncat_women):
print_stars(
f"ipfp_hetero_solver: the shape of Phi should be ({ncat_men}, {ncat_women}")
sys.exit(1)
ephi2 = npexp(Phi / 2.0)
ephi2T = ephi2.T
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# starting with a reasonable initial point for tx and ty: : tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
bigc = sqrt(n_couples / np.sum(ephi2))
txi = np.full(ncat_men, bigc)
tyi = np.full(ncat_women, bigc)
err_diff = bigc
tol_diff = tol * err_diff
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = men_margins / sx
sy = ephi2T @ tx
ty = women_margins / sy
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi, tyi = tx, ty
niter += 1
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = np.sum(muxy, 1) - men_margins
marg_err_y = np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return muxy, marg_err_x, marg_err_y
else:
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = ncat_men + ncat_women
n_prod_categories = ncat_men * ncat_women
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:ncat_men, :ncat_men] = np.diag(sxi)
lhs[:ncat_men, ncat_men:] = ephi2 * txi.reshape((-1, 1))
lhs[ncat_men:, ncat_men:] = np.diag(syi)
lhs[ncat_men:, :ncat_men] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 = der_npexp(Phi / 2.0) / \
(2.0 * ephi2) # 1/2 with safeguards
ivar = 0
for iman in range(ncat_men):
rhs[iman, ivar:(ivar + ncat_women)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += ncat_women
ivar1 = ncat_men
ivar2 = 0
for iwoman in range(ncat_women):
rhs[ivar1, ivar2:n_cols_rhs:ncat_women] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:ncat_men, :]
dT = dt_dT[ncat_men:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(ncat_men):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + ncat_women),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += ncat_women
for iwoman in range(ncat_women):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:ncat_women,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy += np.diag(muxy_vec2)
return muxy, marg_err_x, marg_err_y, dmuxy
def ipfp_homo_solver(Phi, men_margins, women_margins, tol=1e-9,
gr=False, verbose=False, maxiter=1000):
"""
solve for equilibrium in a Choo and Siow market
given systematic surplus and margins
:param np.array Phi: matrix of systematic surplus, shape (ncat_men, ncat_women)
:param np.array men_margins: vector of men margins, shape (ncat_men)
:param np.array women_margins: vector of women margins, shape (ncat_women)
:param float tol: tolerance on change in solution
:param boolean gr: if True, also evaluate derivatives of muxy wrt Phi
:param boolean verbose: prints stuff
:param int maxiter: maximum number of iterations
:return:
* (muxy, mux0, mu0y) the matching patterns
* marg_err_x, marg_err_y the errors on the margins
* and the gradients of (muxy, mux0, mu0y) wrt (men_margins, women_margins, Phi) if gr=True
"""
ncat_men = men_margins.size
ncat_women = women_margins.size
if Phi.shape != (ncat_men, ncat_women):
print_stars(
f"ipfp_homo_solver: the shape of Phi should be ({ncat_men}, {ncat_women}")
sys.exit(1)
ephi2 = npexp(Phi / 2.0)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# where mux0=tx**2 and mu0y=ty**2
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
ephi2T = ephi2.T
nindivs = | np.sum(men_margins) | numpy.sum |
from __future__ import absolute_import, division, print_function
import sys
import os.path
import warnings
import datetime, time
import numpy as np
import astropy.table
# See pixsim.py
import astropy.time
from astropy.io import fits
import fitsio
import desitarget
import desitarget.targetmask
from desitarget.targets import main_cmx_or_sv
import desispec.io
import desispec.io.util
import desimodel.io
from desimodel.focalplane import fiber_area_arcsec2
import desiutil.depend
from desiutil.iers import freeze_iers
import desispec.interpolation
import desisim.io
import desisim.specsim
#- Reference observing conditions for each of dark, gray, bright
reference_conditions = dict(DARK=dict(), GRAY=dict(), BRIGHT=dict())
reference_conditions['DARK']['SEEING'] = 1.1
reference_conditions['DARK']['EXPTIME'] = 1000
reference_conditions['DARK']['AIRMASS'] = 1.0
reference_conditions['DARK']['MOONFRAC'] = 0.0
reference_conditions['DARK']['MOONALT'] = -60
reference_conditions['DARK']['MOONSEP'] = 180
reference_conditions['GRAY']['SEEING'] = 1.1
reference_conditions['GRAY']['EXPTIME'] = 1000
reference_conditions['GRAY']['AIRMASS'] = 1.0
reference_conditions['GRAY']['MOONFRAC'] = 0.1
reference_conditions['GRAY']['MOONALT'] = 10
reference_conditions['GRAY']['MOONSEP'] = 60
reference_conditions['BRIGHT']['SEEING'] = 1.1
reference_conditions['BRIGHT']['EXPTIME'] = 300
reference_conditions['BRIGHT']['AIRMASS'] = 1.0
reference_conditions['BRIGHT']['MOONFRAC'] = 0.7
reference_conditions['BRIGHT']['MOONALT'] = 60
reference_conditions['BRIGHT']['MOONSEP'] = 50
for objtype in ('LRG', 'QSO', 'ELG'):
reference_conditions[objtype] = reference_conditions['DARK']
for objtype in ('MWS', 'BGS'):
reference_conditions[objtype] = reference_conditions['BRIGHT']
def simarc(arcdata, nspec=5000, nonuniform=False, testslit=False):
'''
Simulates an arc lamp exposure
Args:
arcdata (Table): Table with columns VACUUM_WAVE and ELECTRONS
nspec (int, optional) number of spectra to simulate
nonuniform (bool, optional): include calibration screen non-uniformity
testslit (bool, optional): this argument is undocumented.
Returns: (wave, phot, fibermap)
wave: 1D[nwave] wavelengths in Angstroms
phot: 2D[nspec,nwave] photons observed by CCD (i.e. electrons)
fibermap: fibermap Table
Note: this bypasses specsim since we don't have an arclamp model in
surface brightness units; we only have electrons on the CCD. But it
does include the effect of varying fiber sizes.
TODO:
* add exptime support
* update inputs to surface brightness and DESI lamp lines (DESI-2674)
* add psfconvolve option
'''
wave = arcdata['VACUUM_WAVE']
phot = arcdata['ELECTRONS']
if testslit:
fibermap = astropy.table.Table(testslit_fibermap()[0:nspec])
else:
fibermap = astropy.table.Table(desispec.io.empty_fibermap(nspec))
fibermap.meta['FLAVOR'] = 'arc'
fibermap['OBJTYPE'] = 'ARC'
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
r = np.sqrt(x**2 + y**2)
#-----
#- Determine ratio of fiber sizes relative to larges fiber
fiber_area = fiber_area_arcsec2(x, y)
size_ratio = fiber_area / np.max(fiber_area)
#- Correct photons for fiber size
phot = np.tile(phot, nspec).reshape(nspec, len(wave))
phot = (phot.T * size_ratio).T
#- Apply calibration screen non-uniformity
if nonuniform:
ratio = _calib_screen_uniformity(radius=r)
assert np.all(ratio <= 1) and np.all(ratio > 0.99)
phot = (phot.T * ratio).T
return wave, phot, fibermap
def simflat(flatfile, nspec=5000, nonuniform=False, exptime=10, testslit=False,
psfconvolve=True, specsim_config_file="desi"):
'''
Simulates a flat lamp calibration exposure
Args:
flatfile (str): filename with flat lamp spectrum data
nspec (int, optional): number of spectra to simulate
nonuniform (bool, optional): include calibration screen non-uniformity
exptime (float, optional): exposure time in seconds
psfconvolve (bool, optional): passed to simspec.simulator.Simulator camera_output.
if True, convolve with PSF and include per-camera outputs
specsim_config_file (str, optional): path to DESI instrument config file.
default is desi config in specsim package.
Returns: (sim, fibermap)
sim: specsim Simulator object
fibermap: fibermap Table
'''
import astropy.units as u
import specsim.simulator
from desiutil.log import get_logger
log = get_logger()
freeze_iers()
log.info('Reading flat lamp spectrum from {}'.format(flatfile))
sbflux, hdr = fits.getdata(flatfile, header=True)
wave = desispec.io.util.header2wave(hdr)
assert len(wave) == len(sbflux)
#- Trim to DESI wavelength ranges
#- TODO: is there an easier way to get these parameters?
try:
params = desimodel.io.load_desiparams()
wavemin = params['ccd']['b']['wavemin']
wavemax = params['ccd']['z']['wavemax']
except KeyError:
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
ii = (wavemin <= wave) & (wave <= wavemax)
wave = wave[ii]
sbflux = sbflux[ii]
#- Downsample to 0.2A grid to not blow up memory
ww = np.arange(wave[0], wave[-1]+0.1, 0.2)
sbflux = desispec.interpolation.resample_flux(ww, wave, sbflux)
wave = ww
if testslit:
fibermap = astropy.table.Table(testslit_fibermap()[0:nspec])
else:
fibermap = astropy.table.Table(desispec.io.empty_fibermap(nspec))
fibermap.meta['FLAVOR'] = 'flat'
fibermap['OBJTYPE'] = 'FLT'
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
r = | np.sqrt(x**2 + y**2) | numpy.sqrt |
#=============================================================================
# Author: <NAME>
#-----------------------------------------------------------------------------
# OHT Analysis Geometry
#-----------------------------------------------------------------------------
# Creating OHT geometry using gmsh api. The first layer is created using gmsh
# api. The subsequent plies and cohesive layers are created using python loops.
# The cohesive elements are zero thickness elements with 8 nodes. 4-nodes from
# bottom and top are shared with plies respectively
#=============================================================================
# Geometry and Mesh controls|
#----------------------------
# <numMainbodyXElem> <numMainbodyXElem>
# <numClampXElem> ←---------→ ←---------→
# ----------------------------------------------------------------------
# ↑ | | |\ /| | |
# | | | | \ / | | |
# | | | | \____/--|-→ <numCSElem> |
# | | | | / \ | | |
# Ly | | | \____/ | | |
# | | | | / | \ | | |
# | | | | / ↓ \ | | |
# | | | |<numCurveElem> | |
# ↓ ----------------------------------------------------------------------
# ←-------clp-------→ ←-------clp-------→
# ←----------------------------- Lx ----------------------------------→
#
#=============================================================================
def create_geo(Rx=0.0024,file_dst="D:\\",filename='sample_temp',
onlypreview=0, save_mesh=0,meshname='mymesh',
preview_geom_mesh=0,log_flag=0, numCurveElem=25,
numClampXElem =4, numMainbodyXElem= 10, numCSElem = 15):
"""
Args:
Rx (float, optional): Longitudinal radius of elliptical hole.
Defaults to 0.0024.
file_dst (str, optional): Destination path of generated geometry file.
Defaults to "D:\".
filename (str, optional): Filename of generated geometry file.
Defaults to 'sample_temp'.
onlypreview (int, optional): Only previews the geometry in GMSH
application no saving. Defaults to 0.
save_mesh (int, optional): Flag to save the mesh (different from the
LS-Dyna geometry and mesh). Defaults to 0.
meshname (str, optional): Mesh name. Defaults to 'mymesh'.
preview_geom_mesh (int, optional): Previews the gemetry (and mesh) then
saves it. User needs to close preview window manually to proceed.
Defaults to 0.
log_flag (int, optional): Whethre to generate GMSH log or not.
Defaults to 0.
numCurveElem (int, optional): Mesh control- Number of elements on the
hole circumference (in each of the 4 sectors). Defaults to 25.
numClampXElem (int, optional): Mesh control- Number of element on
clamping area in x-direction. Defaults to 4.
numMainbodyXElem (int, optional): Mesh control- Number of element on
mainbody area in x-direction. Defaults to 10.
numCSElem (int, optional): Mesh control- Number of elements in cross
sectional part of notch mesh. Defaults to 15.
"""
# Importing utilities
import gmsh # install the api using => "pip install gmsh"
import numpy as np
np.set_printoptions(precision=20)
from time import process_time
try:from mt_x_mainKeywordString import mainFile
except:from utilities.mt_x_mainKeywordString import mainFile
import subprocess
import os
import time
start = process_time()
# Model geometry details
Lx = 0.240 # Length of geometry in x-direction
Ly = 0.0288 # Width of geometry in y-direction
Lz = 0.0019 # Thickness of geometry in z-direction
clp = 0.05 # Claming distance
numPly=12 # Number of composite material layers(plies)
LzPly = Lz/numPly # Thickness of each ply
numCohElem = numPly-1 # Number of cohesive element layers (numPly-1)
Rx=Rx # Radius of elliptical notch in x-direction
Ry=0.0024 # Radius of elliptical notch in y-direction
MeshSizeFactor = 1 # Resulatant = MeshSize * meshSizeFactor
os.makedirs(file_dst, exist_ok=True)
# lsprepost application location (if not in path)
lspp_loc = "C:\Program Files\LSTC\LS-PrePost 4.8\lsprepost4.8_x64.exe"
# starting gmsh
gmsh.initialize()
# logging gmsh geometry and mesh creation process
if log_flag:
gmsh.logger.start()
gmsh.model.add("mymodel")
gmsh.option.setNumber('Geometry.CopyMeshingMethod',1);
#=====================================================================
# Geometry creation
#=====================================================================
# Finding intersection point of a line passing through ellipse center and
# ellipse
a = Rx
b = Ry
if Rx>Ry:
m = Ry/Rx*(0.8) # slope of the line
if Rx==Ry:
m = 1
else:
# m = 1
m = Ry/Rx*2 # slope of the line
c = Ly/2 - (m * Lx/2)
h = Lx/2
k = Ly/2
phi = c - k
e1x1 = (((b*b*h)-a*a*m*phi+
a*b*np.sqrt((b*b)+(a*a*m*m)-(2*m*phi*h)-(phi*phi)-(m*m*h*h)))/
(a*a*m*m + b*b))
e1y1 = m*e1x1 + c
e1x2 = (((b*b*h) - a*a*m*phi -
a*b*np.sqrt(b*b + a*a*m*m - 2*m*phi*h - phi*phi - m*m*h*h))/
(a*a*m*m + b*b))
e1y2 = m*e1x2 + c
e2x1 = 0 + (Lx/2-Ly/2)
e2y1 = 0
e2x2 = Ly+(Lx/2-Ly/2)
e2y2 = Ly
# Creating points in the space
gmsh.model.occ.addPoint(0, 0, 0, 1.0) #1
gmsh.model.occ.addPoint(clp, 0, 0, 1.0) #2
gmsh.model.occ.addPoint(Lx-clp, 0, 0, 1.0) #3
gmsh.model.occ.addPoint(Lx, 0, 0, 1.0) #4
gmsh.model.occ.addPoint(0, Ly, 0, 1.0) #5
gmsh.model.occ.addPoint(clp, Ly, 0, 1.0) #6
gmsh.model.occ.addPoint(Lx-clp, Ly, 0, 1.0) #7
gmsh.model.occ.addPoint(Lx, Ly, 0, 1.0) #8
gmsh.model.occ.addPoint(e1x2, e1y2, 0, 1.0) #9
gmsh.model.occ.addPoint(e1x1, e1y2, 0, 1.0) #10
gmsh.model.occ.addPoint(e1x1, e1y1, 0, 1.0) #11
gmsh.model.occ.addPoint(e1x2, e1y1, 0, 1.0) #12
gmsh.model.occ.addPoint(e2x2, e2y2, 0, 1.0) #13
gmsh.model.occ.addPoint(e2x1, e2y2, 0, 1.0) #14
gmsh.model.occ.addPoint(e2x1, e2y1, 0, 1.0) #15
gmsh.model.occ.addPoint(e2x2, e2y1, 0, 1.0) #16
if Rx>=Ry:
gmsh.model.occ.addPoint(Lx/2, Ly, 0, 1.0) #17
else:
gmsh.model.occ.addPoint(0, Ly/2, 0, 1.0) #18
gmsh.model.occ.addPoint(clp, Ly/2, 0, 1.0) #19
gmsh.model.occ.addPoint(e2x1, Ly/2, 0, 1.0) #20
ClampXElem = []
MainbodyXElem = []
CSElem = []
CurveElem = []
Curve2 = []
# Creating lines by joining points
if Rx>=Ry:
CurveElem.append(gmsh.model.occ.addLine(1,5))
CurveElem.append(gmsh.model.occ.addLine(2,6))
CurveElem.append(gmsh.model.occ.addLine(3,7))
CurveElem.append(gmsh.model.occ.addLine(4,8))
CurveElem.append(gmsh.model.occ.addLine(14,15))
CurveElem.append(gmsh.model.occ.addLine(13,16))
CurveElem.append(gmsh.model.occ.addLine(15,16))
ClampXElem.append(gmsh.model.occ.addLine(1,2))
ClampXElem.append(gmsh.model.occ.addLine(3,4))
ClampXElem.append(gmsh.model.occ.addLine(5,6))
ClampXElem.append(gmsh.model.occ.addLine(7,8))
MainbodyXElem.append(gmsh.model.occ.addLine(2,15))
MainbodyXElem.append(gmsh.model.occ.addLine(16,3))
MainbodyXElem.append(gmsh.model.occ.addLine(6,14))
MainbodyXElem.append(gmsh.model.occ.addLine(13,7))
CSElem.append(gmsh.model.occ.addLine(12,14))
CSElem.append(gmsh.model.occ.addLine(9,15))
CSElem.append(gmsh.model.occ.addLine(10,16))
CSElem.append(gmsh.model.occ.addLine(11,13))
Curve2.append(gmsh.model.occ.addLine(14,17))
Curve2.append(gmsh.model.occ.addLine(17,13))
if Rx<Ry:
ClampXElem.append(gmsh.model.occ.addLine(1,2))
ClampXElem.append(gmsh.model.occ.addLine(3,4))
ClampXElem.append(gmsh.model.occ.addLine(5,6))
ClampXElem.append(gmsh.model.occ.addLine(7,8))
MainbodyXElem.append(gmsh.model.occ.addLine(2,15))
MainbodyXElem.append(gmsh.model.occ.addLine(16,3))
MainbodyXElem.append(gmsh.model.occ.addLine(6,14))
MainbodyXElem.append(gmsh.model.occ.addLine(13,7))
CSElem.append(gmsh.model.occ.addLine(12,14))
CSElem.append(gmsh.model.occ.addLine(9,15))
CSElem.append(gmsh.model.occ.addLine(10,16))
CSElem.append(gmsh.model.occ.addLine(11,13))
CurveElem.append(gmsh.model.occ.addLine(3,7))
CurveElem.append(gmsh.model.occ.addLine(4,8))
CurveElem.append(gmsh.model.occ.addLine(13,16))
CurveElem.append(gmsh.model.occ.addLine(13,14))
CurveElem.append(gmsh.model.occ.addLine(15,16))
Curve2.append(gmsh.model.occ.addLine(1,17))
Curve2.append(gmsh.model.occ.addLine(17,5))
Curve2.append(gmsh.model.occ.addLine(2,18))
Curve2.append(gmsh.model.occ.addLine(18,6))
Curve2.append(gmsh.model.occ.addLine(15,19))
Curve2.append(gmsh.model.occ.addLine(19,14))
gmsh.model.occ.synchronize()
# Creating ellipse
mellipse = np.pi/2
if Rx>=Ry:
ellipse = gmsh.model.occ.addEllipse(Lx/2,Ly/2,0,Rx,Ry,angle1=mellipse,
angle2=2*np.pi+mellipse)
else:
ellipse = gmsh.model.occ.addEllipse(Lx/2,Ly/2,0,Ry,Rx,angle1=mellipse,
angle2=2*np.pi+mellipse)
gmsh.model.occ.rotate([(1,ellipse)], Lx/2, Ly/2, 0, 0, 0, 1, np.pi/2)
gmsh.model.occ.synchronize()
# Splitting ellipse using lines across ellipse
cutOut = gmsh.model.occ.cut([(1,ellipse)],
[(1,CSElem[0]),(1,CSElem[1]),(1,CSElem[2]),
(1,CSElem[3])],removeTool=(False))
for i in range(1,len(cutOut[0])-1):
CurveElem.append(cutOut[0][i][1])
Curve2.append(cutOut[0][0][1])
Curve2.append(cutOut[0][-1][1])
gmsh.model.occ.synchronize()
# Surface groups : Grouping different lines to form closed surface
# boundaries.
sTag_list = []
if Rx>=Ry:
linegroup = [
[1,8,2,10],[14,2,12,5],[6,13,3,15],[3,9,4,11],
[17,7,18,24],[25,18,6,19],[16,5,17,23]
]
else:
linegroup = [
[6,13,8,15],[13,2,14,4],
[17,11,25,10],[11,15,12,26],[12,16,9,27]
]
for surface in linegroup:
lTag = gmsh.model.occ.add_wire(surface)
sTag = gmsh.model.occ.add_plane_surface([lTag])
sTag_list.append(sTag)
gmsh.model.occ.synchronize()
# Setting transfinite curves for structured mesh
for i in ClampXElem:
gmsh.model.mesh.setTransfiniteCurve(i,numClampXElem)
gmsh.model.occ.synchronize()
for i in MainbodyXElem:
gmsh.model.mesh.setTransfiniteCurve(i,numMainbodyXElem)
gmsh.model.occ.synchronize()
for i in CSElem:
gmsh.model.mesh.setTransfiniteCurve(i,numCSElem)
gmsh.model.occ.synchronize()
for i in CurveElem:
gmsh.model.mesh.setTransfiniteCurve(i,numCurveElem)
gmsh.model.occ.synchronize()
for i in Curve2:
gmsh.model.mesh.setTransfiniteCurve(i,int(numCurveElem/2))
gmsh.model.occ.synchronize()
# Setting tranfinite surfaces for structured mesh
for i in sTag_list:
gmsh.model.mesh.setTransfiniteSurface(i)
gmsh.model.occ.synchronize()
# surface groups : Grouping different lines to form closed surface
# boundaries (with more than 4 points/lines)
if Rx>=Ry:
lTag = gmsh.model.occ.add_wire([19,21,20,16,22,26])
gmsh.model.occ.synchronize()
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[13,14,12,11])
gmsh.model.occ.synchronize()
elif Rx<Ry:
lTag = gmsh.model.occ.add_wire([1,20,21,3,19,18])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[1,2,6,5])
gmsh.model.occ.synchronize()
lTag = gmsh.model.occ.add_wire([5,22,23,7,21,20])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[2,15,14,6])
gmsh.model.occ.synchronize()
lTag = gmsh.model.occ.add_wire([9,23,22,10,24,28])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[15,9,12,14])
gmsh.model.occ.synchronize()
gmsh.model.mesh.recombine()
# Extrude: Adding thickness to create a singly ply.
# Number of elements in thickness direction
numElemThickness = 3
model_ = gmsh.model.getEntities(2)
gmsh.model.occ.synchronize()
gmsh.model.occ.extrude(model_,0,0,LzPly,numElements=[numElemThickness],
heights=[1],recombine=True)
gmsh.model.occ.synchronize()
#=====================================================================
# Meshing
#=====================================================================
# Mesh options
gmsh.option.setNumber("Mesh.Smoothing", 100)
# 2D mesh algorithm (1: MeshAdapt, 2: Automatic, 3: Initial mesh only,
# 5: Delaunay, 6: Frontal-Delaunay, 7: BAMG, 8: Frontal-Delaunay for Quads,
# 9: Packing of Parallelograms)
meshalgo2d = 8
gmsh.option.setNumber("Mesh.Algorithm",meshalgo2d)
# Recombine all triangular meshes? (yes:1, no:0)
RecombineTriMesh = 1
gmsh.option.setNumber("Mesh.RecombineAll",RecombineTriMesh)
gmsh.option.setNumber("Mesh.MeshSizeFactor",MeshSizeFactor)
# Generating mesh
gmsh.model.mesh.clear()
# Meshing 2D
gmsh.model.mesh.generate(2)
# Meshing 3D
gmsh.model.mesh.generate(3)
# gmsh mesh name without extension # save mesh (yes-1,no-0)
if save_mesh:
gmsh.write(f"{meshname}.msh")
if onlypreview==0:
# Get nodes and their coordinates
nodeTags, nodeCoords, _ = gmsh.model.mesh.getNodes()
# Type,number of elements,
elementTypes, elementTags, elementNodeTags = gmsh.model.mesh.getElements(3)
elementTypes = elementTypes[0]
elementTags = elementTags[0]
elementNodeTags = elementNodeTags[0]
# Launch the GUI to to preview geometry and mesh using gmsh applications.
# Script pauses untill gmsh application is closed manually
if preview_geom_mesh:
gmsh.fltk.run()
if log_flag:
log = gmsh.logger.get()
gmsh.logger.stop()
# close gmsh
gmsh.finalize()
if onlypreview == 0:
#=====================================================================
# Data Extraction: processing data collected from gmsh
#=====================================================================
numElem = len(elementTags)
if meshalgo2d==8 or RecombineTriMesh==1: # processing based on shape of solid elements in mesh
elemtonodes = np.reshape(elementNodeTags,(-1,8)) # quad/hex elements -> 8 unique coordinates
else:
elemtonodes = np.reshape(elementNodeTags,(-1,4)) # tetra hedral elements -> 4 unique coordinates
last4nodes = np.transpose(np.asarray([elemtonodes[:,3]])) # when represented in terms of hexahedral coordinates, the last node is repeated 4 times -> resulting in 8 nodes
last4nodes = np.repeat(last4nodes,4,axis=1)
elemtonodes = np.concatenate([elemtonodes,last4nodes],axis=1)
node_coord = np.round(np.transpose(np.vstack([nodeCoords[0::3],nodeCoords[1::3],nodeCoords[2::3]])),16)
numNode = len(node_coord)
assert(len(node_coord) == len(nodeTags))
assert(len(elemtonodes) == numElem)
print('Total number of elements in 1st ply:\t',numElem)
print('Total number of nodes in 1st ply:\t',numNode)
print('Mesh generation of 1st ply complete...')
#=====================================================================
# Building full model
#=====================================================================
# Add 1st layer mesh data to the database dictionary 'mesh_data'
#---------------------------------------------------------------------
mesh_data = {}
mesh_data[1]={}
mesh_data[1]['nodeCoord'] = np.copy(node_coord)
mesh_data[1]['elemNode'] = np.copy(elemtonodes)
mesh_data[1]['elemidx'] = np.array(list(range(1,numElem+1))).astype('int64')
mesh_data[1]['nodeidx'] = np.copy(nodeTags.astype('int64'))
# To assert that there no duplicate elements sharing same nodes
assert(len(mesh_data[1]['elemNode']) ==
len(np.unique(mesh_data[1]['elemNode'],axis=0)))
# Adding other layer mesh data to the database dictionary 'mesh_data'
#---------------------------------------------------------------------
for ply in range(2,numPly+1):
mesh_data[ply]={}
# Adding thickness to all z coordinates
new_z = mesh_data[ply-1]['nodeCoord'][:,2] + LzPly
mesh_data[ply]['nodeCoord'] = np.transpose(np.vstack([mesh_data[ply-1]['nodeCoord'][:,0],mesh_data[ply-1]['nodeCoord'][:,1],new_z]))
mesh_data[ply]['nodeCoord'] = np.round(mesh_data[ply]['nodeCoord'],16)
mesh_data[ply]['elemidx'] = np.arange( max(mesh_data[ply-1]['elemidx'])+1, max(mesh_data[ply-1]['elemidx'])+1+numElem )
mesh_data[ply]['nodeidx'] = np.arange( max(mesh_data[ply-1]['nodeidx'])+1, max(mesh_data[ply-1]['nodeidx'])+1+numNode )
mesh_data[ply]['elemNode'] = | np.copy(mesh_data[ply-1]['elemNode'] + numNode) | numpy.copy |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
# if the Wine dataset is temporarily unavailable from the
# UCI machine learning repository, un-comment the following line
# of code to load the dataset from a local path:
# df_wine = pd.read_csv('wine.data', header=None)
# df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
# 'Alcalinity of ash', 'Magnesium', 'Total phenols',
# 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
# 'Color intensity', 'Hue',
# 'OD280/OD315 of diluted wines', 'Proline']
# print(df_wine.head())
#
# print(df_wine.tail())
# X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=0)
#
# sc = StandardScaler()
# X_train_std = sc.fit_transform(X_train)
# X_test_std = sc.transform(X_test)
print(df_wine.shape)
from sklearn.preprocessing import LabelEncoder
X = df_wine.loc[:, 1:].values
y = df_wine.loc[:, 0].values
le = LabelEncoder()
y = le.fit_transform(y)
print(le.classes_)
# 1) Split dataset into ratio 7:3 for training and test sets, respectively.
# Then use pipeline with StandardScaler(), PCA (n=3), and SVM with RBF kernel to fit the
# training set and predict the test set. Report the accuracy score.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=0)
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
# pipe_lr = make_pipeline(StandardScaler(),PCA(n_components=2), LogisticRegression(random_state=1))
# pipe_lr.fit(X_train, y_train)
# y_pred = pipe_lr.predict(X_test)
# print('Test Accuracy: %.3f' % pipe_lr.score(X_test, y_test))
from sklearn.svm import SVC
pipe_lr = make_pipeline(StandardScaler(),PCA(n_components=3), SVC(random_state=1))
pipe_lr.fit(X_train, y_train)
y_pred = pipe_lr.predict(X_test)
print('Test Accuracy: %.3f' % pipe_lr.score(X_test, y_test))
# 2) Use StratifiedKFold cross-validation to report the accuracy score (mean with std).
# What are differences between standard k-fold and StratifiedKFold?
# What are differences between StratifiedKFold and cross_val_score (in [12] and [13] of this notebook)?
# KFold will divide your data set into prespecified number of folds, and every sample must be in one and only one fold.
# A fold is a subset of your dataset.
#
# ShuffleSplit will randomly sample your entire dataset during each iteration to generate a training set and a test set.
# The test_size and train_size parameters control how large the test and training test set should be for each iteration.
# Since you are sampling from the entire dataset during each iteration, values selected during one iteration, could be selected again during another iteration.
#
# Summary: ShuffleSplit works iteratively, KFold just divides the dataset into k folds.
#
# Difference when doing validation
#
# In KFold, during each round you will use one fold as the test set and all the remaining folds as your training set.
# However, in ShuffleSplit, during each round n you should only use the training and test set from iteration n.
# As your data set grows, cross validation time increases, making shufflesplits a more attractive alternate.
# If you can train your algorithm, with a certain percentage of your data as opposed to using all k-1 folds, ShuffleSplit is an attractive option.
from sklearn.model_selection import KFold
from sklearn import svm
svc = svm.SVC(C=1, kernel='linear')
kf = KFold(n_splits=10)
kf.get_n_splits(X)
# print(kf)
KF_scores = list()
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
KF_scores.append(svc.fit(X_train, y_train).score(X_test, y_test))
print('\nKF_scores: %s' % KF_scores)
print('KF accuracy: %.3f +/- %.3f' % ( | np.mean(KF_scores) | numpy.mean |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.metrics import multilabel_confusion_matrix
import json
import os
import numpy as np
import pandas as pd
import wandb
from wandb.keras import WandbCallback
wandb.login()
# Read in image and label arrays
X = np.load('x_punk.npy')
Y = np.load('y_punk.npy', allow_pickle=True)
# Split into train validation and test.
# No need to shuffle as Punks are randomly generated.
x_train = X[:8000]
y_train = Y[:8000].astype('float32')
x_val = X[8000:9000]
y_val = Y[8000:9000].astype('float32')
x_test = X[9000:]
y_test = Y[9000:].astype('float32')
# Form a baseline prediction for prediction punk type & accessory
baseline_prediction = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,0] for i in range(1000)]).astype('float32')
accuracy = 0
for i in range(1000):
accuracy += 92-abs(y_test[i]-baseline_prediction[i]).sum()/92
baseline_acc = accuracy/1000
# Define train and test steps
def train_step(x, y, model, optimizer, loss_fn, train_acc_metric):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
def test_step(x, y, model, loss_fn, val_acc_metric):
val_logits = model(x, training=False)
loss_value = loss_fn(y, val_logits)
val_acc_metric.update_state(y, val_logits)
return loss_value
def train(train_dataset,
val_dataset,
model,
optimizer,
loss_fn,
train_acc_metric,
val_acc_metric,
epochs=10,
log_step=200,
val_log_step=50):
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
train_loss = []
val_loss = []
# Iterate over the batches of the dataset
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train,
model, optimizer,
loss_fn, train_acc_metric)
train_loss.append(float(loss_value))
# Run a validation loop at the end of each epoch
for step, (x_batch_val, y_batch_val) in enumerate(val_dataset):
val_loss_value = test_step(x_batch_val, y_batch_val,
model, loss_fn,
val_acc_metric)
val_loss.append(float(val_loss_value))
# Display metrics at the end of each epoch
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
val_acc = val_acc_metric.result()
print("Validation acc: %.4f" % (float(val_acc),))
# Reset metrics at the end of each epoch
train_acc_metric.reset_states()
val_acc_metric.reset_states()
# Log metrics using wandb.log
wandb.log({'epochs': epoch,
'loss': np.mean(train_loss),
'acc': float(train_acc),
'val_loss': | np.mean(val_loss) | numpy.mean |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 07:03, 18/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import exp, sign, ones, mean, multiply
from numpy.random import uniform, randint, normal, random, choice
from copy import deepcopy
from mealpy.root import Root
class BaseEO(Root):
"""
The original version of: Equilibrium Optimizer (EO)
(Equilibrium Optimizer: A Novel Optimization Algorithm)
Link:
https://doi.org/10.1016/j.knosys.2019.105190
https://www.mathworks.com/matlabcentral/fileexchange/73352-equilibrium-optimizer-eo
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100):
Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)
self.epoch = epoch
self.pop_size = pop_size
self.V = 1
self.a1 = 2
self.a2 = 1
self.GP = 0.5
def train(self):
#c_eq1 = [None, float("inf")] # it is global best position
c_eq2 = [None, float("inf")]
c_eq3 = [None, float("inf")]
c_eq4 = [None, float("inf")]
# ---------------- Memory saving-------------------
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
c_eq1 = deepcopy(g_best)
for epoch in range(0, self.epoch):
for i in range(0, self.pop_size):
if pop[i][self.ID_FIT] < c_eq1[self.ID_FIT]:
c_eq1 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq2[self.ID_FIT]:
c_eq2 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq3[self.ID_FIT]:
c_eq3 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq3[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq4[self.ID_FIT]:
c_eq4 = deepcopy(pop[i])
# make equilibrium pool
c_eq_ave = (c_eq1[self.ID_POS] + c_eq2[self.ID_POS] + c_eq3[self.ID_POS] + c_eq4[self.ID_POS]) / 4
fit_ave = self.get_fitness_position(c_eq_ave)
c_pool = [c_eq1, c_eq2, c_eq3, c_eq4, [c_eq_ave, fit_ave]]
# Eq. 9
t = (1 - epoch/self.epoch) ** (self.a2 * epoch / self.epoch)
for i in range(0, self.pop_size):
lamda = uniform(0, 1, self.problem_size) # lambda in Eq. 11
r = uniform(0, 1, self.problem_size) # r in Eq. 11
c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool
f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 11
r1 = uniform()
r2 = uniform() # r1, r2 in Eq. 15
gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) # Eq. 15
g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) # Eq. 14
g = g0 * f # Eq. 13
temp = c_eq + (pop[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 16
fit = self.get_fitness_position(temp)
pop[i] = [temp, fit]
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class ModifiedEO(BaseEO):
"""
Original version of: Modified Equilibrium Optimizer (MEO)
(An efficient equilibrium optimizer with mutation strategy for numerical optimization)
Link:
https://doi.org/10.1016/j.asoc.2020.106542
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100):
BaseEO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size)
def _make_equilibrium_pool__(self, list_equilibrium=None):
pos_list = [item[self.ID_POS] for item in list_equilibrium]
pos_mean = mean(pos_list, axis=0)
fit = self.get_fitness_position(pos_mean)
list_equilibrium.append([pos_mean, fit])
return list_equilibrium
def train(self):
# Initialization
pop_len = int(self.pop_size/3)
pop = [self.create_solution() for _ in range(self.pop_size)]
# ---------------- Memory saving-------------------
# make equilibrium pool
pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT])
c_eq_list = deepcopy(pop_sorted[:4])
g_best = deepcopy(c_eq_list[0])
c_pool = self._make_equilibrium_pool__(c_eq_list)
for epoch in range(0, self.epoch):
# Eq. 5
t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch)
for i in range(0, self.pop_size):
lamda = | uniform(0, 1, self.problem_size) | numpy.random.uniform |
'''
Created on Dec 13, 2013
@author: benni
'''
import numpy as np
import constants
# import modleon as md
import tsaiwu as md
from error_functions import OuterNewtonError, OuterNewtonConvergenceError
import umat_driver
import time
# mixed stress/strain driven
def strain_stress_update(sigma_old, delta_sigma,
epsilon_old, delta_epsilon,
delta_sigma_def, delta_epsilon_def,
state_vars_old,
umat_aux_vars, rot_angles):
# performs outer newton iteration
# i.e. computes strain vector at the end of a load/stress increment
#
# input values:
# ------------
# sigma_old stress vector at the beginning of the load increment
# delta_sigma stress increment
# epsilon_old strain vector at the beginning of the load increment
# delta_epsilon strain increment
# delta_sigma_def boolean list with active components of stress increment vector (active/defined means True, inactive/undefined means False)
# delta_epsilon_def boolean list with active components of strain increment vector (active/defined means True, inactive/undefined means False)
# state_vars_old state variables array at the beginning of the step
# umat_aux_vars dictionary with auxiliary unimportant variables needed by umat
# rot_angles tuple with rotation angles which define the directions of the material axes of the rock within the global coordinate system
# alpha: rotation angle, around z-axis, eastwards positive (zyz convention)
# beta: rotation angle, around rotated y-axis, downwards positive (zyz convention)
# gamma: third rotation angle, around rotated z-axis, eastwards positive (zyz convention)
#
# return values:
# --------------
# epsilon strain vector at the end of the load increment
# state_vars state variables array at the end of the load increment
# -----------------------------------------------------
# DATA CHECK
# -----------------------------------------------------
# list for number of inner iterations performed within each outer iteration
no_inner_it = [-1 for i in range(constants.max_it)]
# unpacking of rotation angles
alpha, beta, gamma = rot_angles
# rotate old stress and old strain vector into material direction
sigma_old_rot = md.stress_routines.rotate_sigma(sigma_old, alpha, beta, gamma) # rotated old stress vector of last step
epsilon_old_rot = md.stress_routines.rotate_epsilon(epsilon_old, alpha, beta, gamma) # rotated old strain vector of last step
# check delta_sigma_def and delta_epsilon_def
for i in range(6):
# check if ith entry of delta_sigma_def equals ith entry of delta_epsilon_def
if delta_sigma_def[i] == delta_epsilon_def[i]:
# entries are the same, raise exception
raise OuterNewtonError
# -----------------------------------------------------
# DATA PREPARATION
# -----------------------------------------------------
# number of unknown stress components
n_sigma_undef = sum(delta_sigma_def == False )
#print "number of unknown stress components: ", n_sigma_undef
# number of unknown strain components
#n_epsilon_undef = 6-n_sigma_undef
#print "number of unknown strain components: ", n_epsilon_undef, "\n"
# create list with indices of known delta sigma components
# and unknown delta sigma components
ind_dsig_def = [i for i in range(6) if delta_sigma_def[i]==True]
ind_dsig_undef = [i for i in range(6) if delta_sigma_def[i]==False]
#print "indices of defined stress components: ", ind_dsig_def
#print "indices of undefined stress components: ", ind_dsig_undef, "\n"
# create list with indices of known delta epsilon components
# and unknown delta epsilon components
ind_deps_def = ind_dsig_undef
ind_deps_undef = ind_dsig_def
#print "indices of defined strain components: ", ind_deps_def
#print "indices of undefined strain components: ", ind_deps_undef, "\n"
# -----------------------------------------------------
# INITIAL COMPUTATIONS
# -----------------------------------------------------
# define vector of unknown quantities and initialize it with converged values from the last step
y = np.empty((6,))
y[ind_dsig_undef] = sigma_old[ind_dsig_undef]
y[ind_deps_undef] = epsilon_old[ind_deps_undef]
# define vector for increment of unknown quantities (are computed in newton iteration)
delta_y = np.zeros_like(y)
# define first approximation of updated strain vector
# undefined components are set to converged values of last step
# defined components are set to defined value
eps_tmp = np.array(epsilon_old)
eps_tmp[ind_deps_def] = eps_tmp[ind_deps_def] + delta_epsilon[ind_deps_def]
#print "first approximation of updated strain vector: ", eps_tmp, "\n"
# define first approximation of updated stess vector
# undefined components are set to converged values of last step
# defined components are set to defined value
sigma_tmp = np.array(sigma_old)
sigma_tmp[ind_dsig_def] = sigma_tmp[ind_dsig_def] + delta_sigma[ind_dsig_def]
#print "first approximation of updated stress vector: ", sigma_tmp, "\n"
# initialize temporary elastoplastic tangent as empty array
C_ep_tmp = np.empty((6,6), order='F')
# initialize jacobian as empty array
jac = np.empty_like(C_ep_tmp)
# -----------------------------------------------------
# BEGIN NEWTON ITERATION
# -----------------------------------------------------
# define temporary variables
sig_umat =np.empty_like(sigma_old) # temporary stress vector from umat
statev_tmp = np.empty_like(state_vars_old) # temporary state variables from umat
delta_eps_tmp = np.empty_like(epsilon_old) # temporary strain increment vector as input to umat
sig_umat_rot = | np.empty_like(sig_umat) | numpy.empty_like |
'''
------------------------------------------------------------------------
Functions for generating demographic objects necessary for the OG-USA
model
This module defines the following function(s):
get_fert()
get_mort()
pop_rebin()
get_imm_resid()
immsolve()
get_pop_objs()
------------------------------------------------------------------------
'''
import os
import pickle
import numpy as np
import pandas as pd
import scipy.optimize as opt
import scipy.interpolate as si
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import parameter_plots as pp
# create output directory for figures
CUR_PATH = os.path.split(os.path.abspath(__file__))[0]
OUTPUT_DIR = os.path.join(CUR_PATH, 'OUTPUT', 'Demographics')
if os.access(OUTPUT_DIR, os.F_OK) is False:
os.makedirs(OUTPUT_DIR)
'''
------------------------------------------------------------------------
Define functions
------------------------------------------------------------------------
'''
def get_true_demog_data(min_age, max_age):
'''
Return the true demographic data for a country.
Args:
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
Returns:
fert_data (Numpy array): fertility rates for each model period of life
mort_data (Numpy array): mortality rates for each model period of life
imm_data (Numpy array): immigration rates for each model period of life
pop_data (Numpy array): population for each model period of life
'''
# Filepaths
fert_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'fert.p')
mort_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'mort.p')
pop_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'pop.p')
imm_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'imm.p')
# Load data
fert_data = pickle.load(open(fert_filepath, 'rb'))
mort_data = pickle.load(open(mort_filepath, 'rb'))
pop_data = pickle.load(open(pop_filepath, 'rb'))
imm_data = pickle.load(open(imm_filepath, 'rb'))
# Take most recent population
pop_2014 = pop_data[2014][max(min_age, 0): min(max_age + 1, len(pop_data[2014]))]
pop_2015 = pop_data[2015][max(min_age, 0): min(max_age + 1, len(pop_data[2015]))]
# Take immigration as average over last 3 years
drop_immyears = sorted(imm_data.columns)[:- 3]
imm_data = imm_data.drop(drop_immyears, axis=1)
imm_data = imm_data.mean(axis=1)
return pop_2014, pop_2015, fert_data[2014], mort_data[2014], imm_data
def select_fert_data(fert, set_zeroes=False):
new_fert = fert[fert['AgeDef'] == 'ARDY']
new_fert = new_fert[new_fert['Collection'] == 'HFD']
new_fert = new_fert[(new_fert['RefCode'] == 'JPN_11')]
new_fert.drop(['AgeDef', 'Collection', 'RefCode'], axis=1, inplace=True)
new_fert.columns = ['Year', 'Age', 'Values']
if set_zeroes:
new_fert['Values'][new_fert['Age'] == 14] = 0
new_fert['Values'][new_fert['Age'] == 15] = 0
new_fert['Values'][new_fert['Age'] == 49] = 0
new_fert['Values'][new_fert['Age'] == 50] = 0
return new_fert.astype(float)
# a = get_fert(100, 0, 99, 'jpn', 'dynamic_partial')
def get_fert(totpers, min_age, max_age, graph=False, demog_files=[False, False, False]):
'''
Generate a vector of fertility rates by model period
age that corresponds to the fertility rate data by age in years.
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
demog_files (Pandas dataframe): alternate demographic forecasts
Returns:
fert_rates (Numpy array): fertility rates for each model period of life
'''
fert_all, mort_all, imm_all = demog_files
# Get data
curr_pop, _, curr_fert, _, _ = get_true_demog_data(min_age, max_age)
# Birth ages
birth_ages = np.arange(14, 51)
# Population Distribution
curr_pop_pct = curr_pop / curr_pop.sum()
if (min_age == 1) and (max_age == 100) and (totpers == 100) and (not graph):
fert_rates = np.zeros(totpers)
# Births from 14-50, but age start at 0
fert_rates[15:52] = curr_fert
return fert_rates
### VARIABLE PREPARATION
num_bins = max_age - min_age + 1
binsize = num_bins / totpers
num_sub_bins = float(10000)
len_subbins = (np.float64(num_bins * num_sub_bins)) / totpers
### POPULATION CREATION
ages = np.linspace(max(min_age, 0), min(max_age, 99), curr_pop_pct.shape[0])
pop_func = si.splrep(ages, curr_pop_pct)
new_bins = np.linspace(max(min_age, 0), min(max_age, 99), int(num_sub_bins * (num_bins - 1)), dtype=float)
curr_pop_sub = si.splev(new_bins, pop_func)
curr_pop_sub = curr_pop_sub / curr_pop_sub.sum()
#### AGE BIN CREATION
# Calculate implied fertility rates in sub-bins of curr_fert
fert_func = si.splrep(birth_ages, curr_fert)
fert_rates_sub = np.zeros(curr_pop_sub.shape)
age_sub = (np.linspace(np.float64(binsize) / num_sub_bins + np.float64(min_age), np.float64(max_age), int(num_sub_bins * (num_bins - 1))) - 0.5 * np.float64(binsize) / num_sub_bins)
# Fill in fertility rates
pred_ind = (age_sub >= birth_ages[0]) * (age_sub <= birth_ages[-1]) # Makes sure it is inside valid range
age_pred = age_sub[pred_ind] # Gets age_sub in the valid range by applying pred_ind
fert_rates_sub[pred_ind] = np.float64(si.splev(age_pred, fert_func))
fert_rates_sub[fert_rates_sub < 0] = 0
fert_rates = np.zeros(totpers)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
if i == totpers - 1:
end_sub_bin += 1
fert_rates[i] = ((
curr_pop_sub[beg_sub_bin:end_sub_bin] *
fert_rates_sub[beg_sub_bin:end_sub_bin]).sum() /
curr_pop_sub[beg_sub_bin:end_sub_bin].sum())
fert_rates = np.nan_to_num(fert_rates)
if graph:
pp.plot_fert_rates(fert_func, birth_ages, totpers, min_age, max_age, curr_fert, fert_rates, output_dir=OUTPUT_DIR)
if (min_age == 1) and (max_age == 100) and (totpers == 100):
fert_rates = np.zeros(totpers)
# Births from 14-50, but age start at 0
fert_rates[15:52] = curr_fert
return fert_rates
def get_mort(totpers, min_age, max_age, graph=False, mort_file=False):
'''
This function generates a vector of mortality rates by model period
age.
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
demog_files (Pandas dataframe): alternate demographic forecasts
Returns:
mort_rates (Numpy array): mortality rates that correspond to each
period of life
infmort_rate (scalar): infant mortality rate
'''
# Get data
_, _, _, curr_mort, _ = get_true_demog_data(min_age, max_age)
# Mortality ages
mort_ages = np.linspace(0, 99, 100).astype(int)
# Infant Mortality Rate
infmort_rate = curr_mort[0]
if (min_age == 1) and (max_age == 100) and (totpers == 100) and (not graph):
return curr_mort, 0 # infmort_rate
### VARIABLE PREPARATION
num_bins = max_age - min_age + 1
binsize = num_bins / totpers
num_sub_bins = int(100)
len_subbins = ((np.float64(num_bins * num_sub_bins)) / totpers)
#### AGE BIN CREATION
# Calculate implied mortality rates in sub-bins of curr_mort
mort_func = si.splrep(mort_ages, curr_mort)
mort_sub = (np.linspace(np.float64(binsize) / num_sub_bins + np.float64(min_age), np.float64(max_age), int(num_sub_bins * (num_bins - 1))) - 0.5 * np.float64(binsize) / num_sub_bins) # CORRECT TO NUM_BINS NOT -1
# Fill in mortality rates
mort_rates_sub_orig = 1 - si.splev(mort_sub, mort_func)
mort_rates_sub_orig[mort_rates_sub_orig > 1] = 1
mort_rates_sub_orig[mort_rates_sub_orig < 0] = 0
mort_rates_sub = np.zeros(mort_rates_sub_orig.shape, dtype=float)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * num_sub_bins))
end_sub_bin = int(np.rint((i + 1) * num_sub_bins))
if i == totpers - 1:
end_sub_bin += 1
tot_period_surv = (np.log(mort_rates_sub_orig[beg_sub_bin:end_sub_bin]) ).sum()
end_surv = np.log(1 - curr_mort[min_age:][i])
if tot_period_surv != 0:
power = end_surv / tot_period_surv
else:
power = 0
mort_rates_sub[beg_sub_bin:end_sub_bin] = mort_rates_sub_orig[beg_sub_bin:end_sub_bin] ** power
mort_rates = np.zeros(totpers)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
if i == totpers - 1:
end_sub_bin += 1
mort_rates[i] = 1 - mort_rates_sub[beg_sub_bin:end_sub_bin].prod()
mort_rates[-1] = 1 # Mortality rate in last period is set to 1
if graph:
pp.plot_mort_rates_data(totpers, min_age, max_age, mort_ages[max(min_age, 0):min(max_age + 1, 100)],
curr_mort[max(min_age, 0):min(max_age + 1, 100)], infmort_rate,
mort_rates, output_dir=OUTPUT_DIR)
if (min_age == 1) and (max_age == 100) and (totpers == 100):
mort_rates = curr_mort
return mort_rates, 0 # infmort_rate
def pop_rebin(curr_pop_dist, totpers_new):
'''
For cases in which totpers (E+S) is less than the number of periods
in the population distribution data, this function calculates a new
population distribution vector with totpers (E+S) elements.
Args:
curr_pop_dist (Numpy array): population distribution over N
periods
totpers_new (int): number of periods to which we are
transforming the population distribution, >= 3
Returns:
curr_pop_new (Numpy array): new population distribution over
totpers (E+S) periods that approximates curr_pop_dist
'''
# Number of periods in original data
assert totpers_new >= 3
totpers_orig = len(curr_pop_dist)
if int(totpers_new) == totpers_orig:
curr_pop_new = curr_pop_dist
elif int(totpers_new) < totpers_orig:
num_sub_bins = float(10000)
ages = np.linspace(0, totpers_orig - 1, totpers_orig)
pop_func = si.splrep(ages, curr_pop_dist)
new_bins = np.linspace(0, totpers_orig - 1,\
int(num_sub_bins * totpers_orig))
pop_ests = si.splev(new_bins, pop_func)
len_subbins = ((np.float64(totpers_orig * num_sub_bins)) /
totpers_new)
curr_pop_new = np.zeros(totpers_new, dtype=np.float64)
for i in range(totpers_new):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
curr_pop_new[i] = \
np.average(pop_ests[beg_sub_bin:end_sub_bin])
# Return curr_pop_new to single precision float (float32)
# datatype
curr_pop_new = np.float32(curr_pop_new) * np.sum(curr_pop_dist) / np.sum(curr_pop_new) # Adjust sum
return curr_pop_new
def predict_population(fert_rate, mort_rate, imm_rate, pop_data):
'''
Predict population as pop_{s+1,t+1} = pop_{s,t}(1 - mort_{t-1})) + pop_{s+1,t}(imm_{t-1}),
and setting pop_{0,t+1} = pop_t * fert_t
'''
# First, calculate births
if len(fert_rate) == 100:
births = fert_rate * pop_data / 2
else:
# Births from 14-50, but age start at 0
births = fert_rate * pop_data[15:52] / 2
births = np.sum(births)
# Second, calculate survivors
survivors = (1 - mort_rate) * pop_data
survivors = np.roll(survivors, 1)
# Third, correct births
survivors[0] = births
# Third, calculate immigration
imm = imm_rate * pop_data
# Fourth, calculate predicted population
pred_pop = survivors + imm
return pred_pop
def immsolve(imm_rates, *args):
'''
This function generates a vector of errors representing the
difference in two consecutive periods stationary population
distributions. This vector of differences is the zero-function
objective used to solve for the immigration rates vector, similar to
the original immigration rates vector from util.calc_imm_resid(), that
sets the steady-state population distribution by age equal to the
population distribution in period int(1.5*S)
Args:
imm_rates (Numpy array):immigration rates that correspond to
each period of life, length E+S
args (tuple): (fert_rates, mort_rates, infmort_rate, omega_cur,
g_n_SS)
Returns:
omega_errs (Numpy array): difference between omega_new and
omega_cur_pct, length E+S
'''
fert_rates, mort_rates, infmort_rate, omega_cur_lev, g_n_SS = args
omega_cur_pct = omega_cur_lev / omega_cur_lev.sum()
new_pop = predict_population(fert_rates, mort_rates, imm_rates, omega_cur_lev)
omega_new = new_pop / new_pop.sum()
omega_errs = omega_new - omega_cur_pct
return omega_errs
def calc_imm_resid(fert_t_minus_1, mort_t_minus_1, pop_t_minus_1, pop_t):
'''
Calculate immigration rate in year t
as (pop_t - pop_{t-1}(1 - mort_{t-1})) / (pop_{t-1}),
and setting pop_t_0 = pop_{t-1} * fert_{t-1}
'''
# First, calculate births
if len(fert_t_minus_1) == 100:
births = fert_t_minus_1 * pop_t_minus_1 / 2
else:
# Births from 14-50, but age start at 0
births = fert_t_minus_1 * pop_t_minus_1[15:52] / 2
births = np.sum(births)
# Second, calculate deaths
deaths = mort_t_minus_1 * pop_t_minus_1
deaths = np.roll(deaths, 1)
# Third, calculate predicted population
pred_pop = np.roll(pop_t_minus_1, 1) - deaths
pred_pop[0] = births
# Fourth, calculate immigration rate
imm = (pop_t - pred_pop) / (pop_t_minus_1)
return imm
def get_pop_objs(E, S, T, min_age, max_age, curr_year, GraphDiag=True):
'''
This function produces the demographics objects to be used in the
OG-USA model package.
Args:
E (int): number of model periods in which agent is not
economically active, >= 1
S (int): number of model periods in which agent is economically
active, >= 3
T (int): number of periods to be simulated in TPI, > 2*S
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
curr_year (int): current year for which analysis will begin,
>= 2020
GraphDiag (bool): =True if want graphical output and printed
diagnostics
Returns:
omega_path_S (Numpy array), time path of the population
distribution from the current state to the steady-state,
size T+S x S
g_n_SS (scalar): steady-state population growth rate
omega_SS (Numpy array): normalized steady-state population
distribution, length S
surv_rates (Numpy array): survival rates that correspond to
each model period of life, lenght S
mort_rates (Numpy array): mortality rates that correspond to
each model period of life, length S
g_n_path (Numpy array): population growth rates over the time
path, length T + S
'''
# Prepare demographics for data_year
data_year = 2015
pop_prev_data, pop_yr_data, _, _, imm_rates_orig = get_true_demog_data(min_age, max_age)
pop_prev_rebin = pop_rebin(pop_prev_data, E + S)
pop_yr_rebin = pop_rebin(pop_yr_data, E + S)
fert_rates = get_fert(E + S, min_age, max_age, graph=False)
mort_rates, infmort_rate = get_mort(E + S, min_age, max_age, graph=False)
mort_rates_S = mort_rates[-S:]
mort_rates_S = np.expand_dims(mort_rates_S, 1)
if not ((min_age == 1) and (max_age == 100) and (E + S == 100)):
imm_rates_orig = calc_imm_resid(fert_rates, mort_rates, pop_prev_rebin, pop_yr_rebin)
ages = np.arange(min_age, max_age + 1)
pop_yr_pct = pop_yr_rebin / np.sum(pop_yr_rebin)
OMEGA_orig = np.zeros((E + S, E + S))
OMEGA_orig[0, :] = fert_rates / 2
OMEGA_orig[1:, :-1] += np.diag(1 - mort_rates[:-1])
OMEGA_orig += np.diag(imm_rates_orig)
# Solve for steady-state population growth rate and steady-state
# population distribution by age using eigenvalue and eigenvector
# decomposition
eigvalues, eigvectors = np.linalg.eig(OMEGA_orig)
g_n_SS = (eigvalues[np.isreal(eigvalues)].real).max() - 1
eigvec_raw = eigvectors[:, (eigvalues[np.isreal(eigvalues)].real).argmax()].real
omega_SS_orig = eigvec_raw / eigvec_raw.sum()
# Age most recent population data to the current year of analysis
pop_past = pop_yr_rebin
pop_curr = np.dot(OMEGA_orig, pop_yr_rebin)
# Age the data to the current year
for per in range(curr_year - data_year - 1):
pop_past = pop_curr.copy()
pop_curr = np.dot(OMEGA_orig, pop_curr)
g_n_curr = ((pop_curr[-S:].sum() - pop_past[-S:].sum()) / pop_past[-S:].sum())
omega_S_preTP = pop_curr[-S:] / pop_curr[-S:].sum()
omega_S_preTP = np.expand_dims(omega_S_preTP, 1)
# Generate time path of nonstationary population
omega_path_lev = np.zeros((E + S, T + S))
omega_path_lev[:, 0] = pop_curr.copy()
for per in range(1, T + S):
pop_curr = np.dot(OMEGA_orig, pop_curr)
omega_path_lev[:, per] = pop_curr.copy()
# Force the population distribution after 1.5*S periods to be the
# steady-state distribution by adjusting immigration rates, holding
# constant mortality, fertility, and SS growth rates
imm_tol = 1e-14
fixper = int(1.5 * S)
omega_SSfx = (omega_path_lev[:, fixper] / omega_path_lev[:, fixper].sum())
imm_objs = (fert_rates, mort_rates, infmort_rate, omega_path_lev[:, fixper], g_n_SS)
imm_fulloutput = opt.fsolve(immsolve, imm_rates_orig, args=(imm_objs), full_output=True, xtol=imm_tol)
imm_rates_adj = imm_fulloutput[0]
imm_diagdict = imm_fulloutput[1]
omega_path_S = (omega_path_lev[-S:, :] / np.tile(omega_path_lev[-S:, :].sum(axis=0), (S, 1)))
omega_path_S[:, fixper:] = np.tile(omega_path_S[:, fixper].reshape((S, 1)), (1, T + S - fixper))
# Population growth rate
g_n_path = np.zeros(T + S)
g_n_path[0] = g_n_curr.copy()
g_n_path[1:] = ((omega_path_lev[-S:, 1:].sum(axis=0) - omega_path_lev[-S:, :-1].sum(axis=0)) / omega_path_lev[-S:, :-1].sum(axis=0))
g_n_path[fixper + 1:] = g_n_SS
# Generate time path of immigration rates
imm_rates_mat = np.zeros((S, T + S))
for per in range(T + S):
if per <= fixper:
imm_rates_mat[:, per] = imm_rates_orig[E:].copy()
else:
imm_rates_mat[:, per] = imm_rates_adj[E:].copy()
# imm_rates_mat = np.hstack((np.tile(np.reshape(np.array(imm_rates_orig)[E:], (S, 1)), (1, fixper)), np.tile(np.reshape(imm_rates_adj[E:], (S, 1)), (1, T + S - fixper))))
# Generate time path of mortality rates
rho_path_lev = np.zeros((S, T + S + S))
for per in range(T + S + S):
rho_path_lev[:, per] = mort_rates[-S:].copy()
if GraphDiag:
# Check whether original SS population distribution is close to
# the period-T population distribution
omegaSSmaxdif = np.absolute(omega_SS_orig - (omega_path_lev[:, T] / omega_path_lev[:, T].sum())).max()
if omegaSSmaxdif > 0.0003:
print('POP. WARNING: Max. abs. dist. between original SS ' +
"pop. dist'n and period-T pop. dist'n is greater than" +
' 0.0003. It is ' + str(omegaSSmaxdif) + '.')
else:
print('POP. SUCCESS: orig. SS pop. dist is very close to ' +
"period-T pop. dist'n. The maximum absolute " +
'difference is ' + str(omegaSSmaxdif) + '.')
# Plot the adjusted steady-state population distribution versus
# the original population distribution. The difference should be
# small
omegaSSvTmaxdiff = np.absolute(omega_SS_orig - omega_SSfx).max()
if omegaSSvTmaxdiff > 0.0003:
print('POP. WARNING: The maximimum absolute difference ' +
'between any two corresponding points in the ' +
'original and adjusted steady-state population ' +
'distributions is' + str(omegaSSvTmaxdiff) + ', ' +
'which is greater than 0.0003.')
else:
print('POP. SUCCESS: The maximum absolute difference ' +
'between any two corresponding points in the ' +
'original and adjusted steady-state population ' +
'distributions is ' + str(omegaSSvTmaxdiff))
# Print whether or not the adjusted immigration rates solved the
# zero condition
immtol_solved = np.absolute(imm_diagdict['fvec'].max()) < imm_tol
if immtol_solved:
print('POP. SUCCESS: Adjusted immigration rates solved ' +
'with maximum absolute error of ' +
str(np.absolute(imm_diagdict['fvec'].max())) +
', which is less than the tolerance of ' +
str(imm_tol))
else:
print('POP. WARNING: Adjusted immigration rates did not ' +
'solve. Maximum absolute error of ' +
str(np.absolute(imm_diagdict['fvec'].max())) +
' is greater than the tolerance of ' + str(imm_tol))
# Test whether the steady-state growth rates implied by the
# adjusted OMEGA matrix equals the steady-state growth rate of
# the original OMEGA matrix
OMEGA2 = np.zeros((E + S, E + S))
OMEGA2[0, :] = fert_rates / 2
OMEGA2[1:, :-1] += np.diag(1 - mort_rates[:-1])
OMEGA2 += np.diag(imm_rates_adj)
eigvalues2, eigvectors2 = np.linalg.eig(OMEGA2)
g_n_SS_adj = (eigvalues[ | np.isreal(eigvalues2) | numpy.isreal |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code was heavily based on https://github.com/open-mmlab/mmediting
from paddle.io import Dataset
import numpy as np
import cv2
import os
from .preprocess import build_preprocess
from .builder import DATASETS
class MaskSynther(object):
def __init__(self, mask_mode="brush_stroke_mask", **mask_config):
self.mask_mode = mask_mode
self.mask_config = mask_config
preprocess = self.mask_config.get("preprocess", [{
"name": "Transforms",
"input_keys": ["mask"],
"pipeline": {"name": "Transpose"}
}])
self.preprocess = build_preprocess(preprocess)
if self.mask_mode == "file_mask":
file_root = mask_config.get("mask_root", None)
assert file_root is not None, "Please set mask_root for file_mode"
mask_list_file = mask_config.get("mask_list_file", None)
assert mask_list_file is not None, "Please set mask_list_file for file_mode"
with open(mask_list_file, "r") as f:
label_list = f.read().split("\n")[:-1]
self.mask_list = [label.split("\t")[1] for label in label_list]
def __getitem__(self, args):
index, img = args
return getattr(self, self.mask_mode)(index, img)
def file_mask(self, index, img):
mask_file = self.mask_list[index]
mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
c, h, w = img.shape
mask = cv2.resize(mask, (w, h), cv2.INTER_NEAREST)
if self.preprocess is not None:
mask = self.preprocess({"mask": mask})["mask"]
return mask
def brush_stroke_mask(self, index, img):
c, h, w = img.shape
mask = np.zeros((h, w))
vert_num_range = self.mask_config.get("num_vertexes", (4, 12))
assert isinstance(vert_num_range, tuple), \
"The type of vert_num_range should be tuple, but got {}".format(type(vert_num_range))
vert_num = np.random.randint(vert_num_range[0], vert_num_range[1])
brush_width_range = self.mask_config.get("brush_width_range", (12, 40))
assert isinstance(brush_width_range, tuple), \
"The type of brush_width_range should be tuple, but got {}".format(type(brush_width_range))
direction_num_range = self.mask_config.get("direction_num_range", (1, 6))
assert isinstance(direction_num_range, tuple), \
"The type of direction_num_range should be tuple, but got {}".format(type(direction_num_range))
angle_mean = self.mask_config.get('angle_mean', np.pi * 2 / 5)
assert isinstance(angle_mean, float), \
"The type of angle_mean should be float, but got {}".format(type(angle_mean))
length_mean_ratio = self.mask_config.get('length_mean_ratio', 1 / 8)
assert isinstance(length_mean_ratio, float) and length_mean_ratio < 1, \
"Length_mean_ratio should be <1, and it's type should be float, " \
"but got {} with type {}".format(length_mean_ratio, type(length_mean_ratio))
length_bias_ratio = self.mask_config.get('length_bias_ratio', 1 / 16)
assert isinstance(length_bias_ratio, float) and length_bias_ratio < 1, \
"Length_bias_ratio should be <1, and it's type should be float, " \
"but got {} with type {}".format(length_bias_ratio, type(length_bias_ratio))
angle_max_bias = self.mask_config.get('angle_max_bias', np.pi * 2 / 15)
assert isinstance(angle_mean, float), \
"The type of angle_mean should be float, but got {}".format(type(angle_mean))
for vert_i in range(vert_num):
start_x = np.random.randint(w)
start_y = | np.random.randint(h) | numpy.random.randint |
import numpy as np
import matplotlib.pyplot as plt
import urllib.request
import os
import time
def download(root_path,filename):
if not os.path.exists(root_path):
os.mkdir(root_path)
if not os.path.exists(os.path.join(root_path,filename)):
url = "http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/"+filename
urllib.request.urlretrieve(url,os.path.join(root_path,filename))
print("The data set: %s downloaded!"%os.path.join(root_path,filename))
else:
print("The data set: %s already has downloaded!"%os.path.join(root_path,filename))
def get_data(filename):
data_list = []
with open(filename,mode="r") as f:
flag = False
while True:
line = f.readline()
if "EOF" in line:
break
elif "NODE_COORD_SECTION" in line:
flag = True
elif flag:
tmp = line.strip().split(" ")
data_list.append([float(item) for item in tmp])
return np.array(data_list)
class ACO:
def __init__(self,ant_num,alpha,beta,rho,Q,epoches):
self.ant_num = ant_num
self.alpha = alpha
self.beta = beta
self.rho = rho
self.Q = Q
self.epoches = epoches
self.citys_mat = None
self.E_best = None
self.sol_best = None
self.length_list = None
self.name = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
def solve(self,citys_mat):
self.citys_mat = citys_mat
citys_num = citys_mat.shape[0]
# 获取邻接矩阵
citys_x = citys_mat[:, 0].reshape(citys_num, 1).dot(np.ones((1, citys_num)))
citys_y = citys_mat[:, 1].reshape(citys_num, 1).dot(np.ones((1, citys_num)))
citys_distance = np.sqrt(np.square(citys_x - citys_x.T) + np.square(citys_y - citys_y.T))
# 初始化启发函数
Heu_f = 1.0/(citys_distance + np.diag([np.inf] * citys_num))
# 信息素矩阵
Tau_table = np.ones((citys_num,citys_num))
# 每一次迭代过程中每个蚂蚁的路径记录表
Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)
# 每一次迭代过程中的最佳路径
Route_best = np.zeros((self.epoches,citys_num),dtype=np.int)
# 每一次迭代过程中最佳路径记录表
Length_best = np.zeros(self.epoches)
# 每次迭代过程中蚂蚁的平均路径长度
Length_average = np.zeros(self.epoches)
# 每次迭代过程中当前路径长度
Length_current = np.zeros(self.ant_num)
iter = 0
while iter <self.epoches:
# 产生城市集合表
# 随机产生各个蚂蚁的起点城市
Route_table[:,0]= self.randseed(citys_num)
# 更新信息素
Delta_tau = np.zeros((citys_num, citys_num))
for k in range(self.ant_num):
# 用于记录蚂蚁下一个访问的城市集合
# 蚂蚁已经访问过的城市
tabu = [Route_table[k,0]]
allow_set = list(set(range(citys_num))-set(tabu))
city_index = Route_table[k,0]
for i in range(1,citys_num):
# 初始化城市之间的转移概率
P_table = np.zeros(len(allow_set))
# 计算城市之间的转移概率
for j in range(len(allow_set)):
P_table[j] = np.power(Tau_table[city_index,allow_set[j]],self.alpha)*\
np.power(Heu_f[city_index,allow_set[j]],self.beta)
P_table = P_table/np.sum(P_table)
# 轮盘赌算法来选择下一个访问的城市
#out_prob = np.cumsum(P_table)
while True:
r = np.random.rand()
index_need = np.where(P_table > r)[0]
if len(index_need) >0:
city_index2 = allow_set[index_need[0]]
break
Route_table[k,i] = city_index2
tabu.append(city_index2)
allow_set = list(set(range(0,citys_num))-set(tabu))
city_index = city_index2
tabu.append(tabu[0])
# 计算蚂蚁路径的距离信息
for j in range(citys_num):
Length_current[k] = Length_current[k] + citys_distance[tabu[j],tabu[j+1]]
for j in range(citys_num):
Delta_tau[tabu[j],tabu[j+1]] = Delta_tau[tabu[j],tabu[j+1]] + self.Q / Length_current[k]
# 计算最短路径、最短路径长度以及平均路径长度
Length_best[iter] = np.min(Length_current)
index = np.where(Length_current == np.min(Length_current))[0][0]
Route_best[iter] = Route_table[index]
Length_average[iter] = np.mean(Length_current)
#更新信息素
Tau_table = (1-self.rho)*Tau_table + Delta_tau
#Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)
Length_current = np.zeros(self.ant_num)
print("epoches:%d,best value every epoches%.4f"%(iter, Length_best[iter]))
iter = iter + 1
self.E_best = | np.min(Length_best) | numpy.min |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
gridsearch_tuner.py including:
class GridSearchTuner
"""
import copy
import logging
import numpy as np
import nni
from nni.tuner import Tuner
from nni.utils import convert_dict2tuple
TYPE = '_type'
CHOICE = 'choice'
VALUE = '_value'
logger = logging.getLogger('grid_search_AutoML')
class GridSearchTuner(Tuner):
"""
GridSearchTuner will search all the possible configures that the user define in the searchSpace.
The only acceptable types of search space are ``choice``, ``quniform``, ``randint``
Type ``choice`` will select one of the options. Note that it can also be nested.
Type ``quniform`` will receive three values [``low``, ``high``, ``q``],
where [``low``, ``high``] specifies a range and ``q`` specifies the interval.
It will be sampled in a way that the first sampled value is ``low``,
and each of the following values is 'interval' larger than the value in front of it.
Type ``randint`` gives all possible intergers in range[``low``, ``high``). Note that ``high`` is not included.
"""
def __init__(self):
self.count = -1
self.expanded_search_space = []
self.supplement_data = dict()
def _json2parameter(self, ss_spec):
"""
Generate all possible configs for hyperparameters from hyperparameter space.
Parameters
----------
ss_spec : dict or list
Hyperparameter space or the ``_value`` of a hyperparameter
Returns
-------
list or dict
All the candidate choices of hyperparameters. for a hyperparameter, chosen_params
is a list. for multiple hyperparameters (e.g., search space), chosen_params is a dict.
"""
if isinstance(ss_spec, dict):
if '_type' in ss_spec.keys():
_type = ss_spec['_type']
_value = ss_spec['_value']
chosen_params = list()
if _type == 'choice':
for value in _value:
choice = self._json2parameter(value)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
elif _type == 'quniform':
chosen_params = self._parse_quniform(_value)
elif _type == 'randint':
chosen_params = self._parse_randint(_value)
else:
raise RuntimeError("Not supported type: %s" % _type)
else:
chosen_params = dict()
for key in ss_spec.keys():
chosen_params[key] = self._json2parameter(ss_spec[key])
return self._expand_parameters(chosen_params)
elif isinstance(ss_spec, list):
chosen_params = list()
for subspec in ss_spec[1:]:
choice = self._json2parameter(subspec)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
chosen_params = list(map(lambda v: {ss_spec[0]: v}, chosen_params))
else:
chosen_params = copy.deepcopy(ss_spec)
return chosen_params
def _parse_quniform(self, param_value):
"""
Parse type of quniform parameter and return a list
"""
low, high, q = param_value[0], param_value[1], param_value[2]
return np.clip(np.arange( | np.round(low/q) | numpy.round |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensornetwork as tn
import pytest
import numpy as np
from tensornetwork.backends.abstract_backend import AbstractBackend
import tensornetwork.linalg
import tensornetwork.linalg.node_linalg
def test_replicate_nodes(backend):
a = tn.Node(np.random.rand(10, 10), backend=backend)
b = tn.Node(np.random.rand(10, 10), backend=backend)
c = tn.Node(np.random.rand(10, 10), backend=backend)
tn.connect(a[1], b[0])
tn.connect(b[1], c[0])
[a_copy, b_copy] = tn.replicate_nodes([a, b])
assert b_copy in tn.reachable([a_copy])
assert not set([a_copy, b_copy]).issubset(tn.reachable([c]))
assert len(b_copy.get_all_dangling()) == 1
def test_split_node_full_svd_names(backend):
a = tn.Node(np.random.rand(10, 10), backend=backend)
e1 = a[0]
e2 = a[1]
left, s, right, _, = tn.split_node_full_svd(
a, [e1], [e2],
left_name='left',
middle_name='center',
right_name='right',
left_edge_name='left_edge',
right_edge_name='right_edge')
assert left.name == 'left'
assert s.name == 'center'
assert right.name == 'right'
assert left.edges[-1].name == 'left_edge'
assert s[0].name == 'left_edge'
assert s[1].name == 'right_edge'
assert right.edges[0].name == 'right_edge'
def test_split_node_relative_tolerance(backend):
absolute = tn.Node(np.diag([2.0, 1.0, 0.2, 0.1]), backend=backend)
relative = tn.Node(np.diag([2.0, 1.0, 0.2, 0.1]), backend=backend)
max_truncation_err = 0.2
_, _, trunc_sv_absolute, = tn.split_node(
node=absolute,
left_edges=[absolute[0]],
right_edges=[absolute[1]],
max_truncation_err=max_truncation_err,
relative=False)
_, _, trunc_sv_relative, = tn.split_node(
node=relative,
left_edges=[relative[0]],
right_edges=[relative[1]],
max_truncation_err=max_truncation_err,
relative=True)
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
def test_split_node_full_svd_relative_tolerance(backend):
absolute = tn.Node(np.diag([2.0, 1.0, 0.2, 0.1]), backend=backend)
relative = tn.Node(np.diag([2.0, 1.0, 0.2, 0.1]), backend=backend)
max_truncation_err = 0.2
_, _, _, trunc_sv_absolute, = tn.split_node_full_svd(
node=absolute,
left_edges=[absolute[0]],
right_edges=[absolute[1]],
max_truncation_err=max_truncation_err,
relative=False)
_, _, _, trunc_sv_relative, = tn.split_node_full_svd(
node=relative,
left_edges=[relative[0]],
right_edges=[relative[1]],
max_truncation_err=max_truncation_err,
relative=True)
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
def test_split_node_rq_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_rq(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_qr_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_qr(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right, _ = tn.split_node(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_rq_unitarity_complex(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
if backend == "jax":
pytest.skip("Complex QR crashes jax")
a = tn.Node(np.random.rand(3, 3) + 1j * np.random.rand(3, 3), backend=backend)
r, q = tn.split_node_rq(a, [a[0]], [a[1]])
r[1] | q[0]
qbar = tn.linalg.node_linalg.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_rq_unitarity_float(backend):
a = tn.Node(np.random.rand(3, 3), backend=backend)
r, q = tn.split_node_rq(a, [a[0]], [a[1]])
r[1] | q[0]
qbar = tn.linalg.node_linalg.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_rq(backend):
a = tn.Node(np.random.rand(2, 3, 4, 5, 6), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_rq(a, left_edges, right_edges)
tn.check_correct([left, right])
np.testing.assert_allclose(a.tensor, tn.contract(left[3]).tensor)
def test_split_node_qr_unitarity_complex(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
if backend == "jax":
pytest.skip("Complex QR crashes jax")
a = tn.Node(np.random.rand(3, 3) + 1j * np.random.rand(3, 3), backend=backend)
q, r = tn.split_node_qr(a, [a[0]], [a[1]])
q[1] | r[0]
qbar = tn.linalg.node_linalg.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_qr_unitarity_float(backend):
a = tn.Node(np.random.rand(3, 3), backend=backend)
q, r = tn.split_node_qr(a, [a[0]], [a[1]])
q[1] | r[0]
qbar = tn.linalg.node_linalg.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_qr(backend):
a = tn.Node(np.random.rand(2, 3, 4, 5, 6), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_qr(a, left_edges, right_edges)
tn.check_correct([left, right])
np.testing.assert_allclose(a.tensor, tn.contract(left[3]).tensor)
def test_reachable(backend):
nodes = [tn.Node(np.random.rand(2, 2, 2), backend=backend) for _ in range(10)]
_ = [nodes[n][0] ^ nodes[n + 1][1] for n in range(len(nodes) - 1)]
assert set(nodes) == tn.reachable(nodes[0])
def test_reachable_2(backend):
a = tn.Node(np.zeros((3, 5)), backend=backend)
b = tn.Node(np.zeros((3, 4, 5)), backend=backend)
e1 = tn.connect(a[0], b[0])
e2 = tn.connect(a[1], b[2])
nodes = [a, b]
edges = [e1, e2]
assert set(nodes) == tn.reachable(edges[0])
assert set(nodes) == tn.reachable(edges)
def test_reachable_disconnected_1(backend):
nodes = [tn.Node(np.random.rand(2, 2, 2), backend=backend) for _ in range(4)]
nodes[0][1] ^ nodes[1][0]
nodes[2][1] ^ nodes[3][0]
assert set(tn.reachable([nodes[0], nodes[2]])) == set(nodes)
assert set(tn.reachable([nodes[0]])) == {nodes[0], nodes[1]}
assert set(tn.reachable([nodes[1]])) == {nodes[0], nodes[1]}
assert set(tn.reachable([nodes[0], nodes[1]])) == {nodes[0], nodes[1]}
assert set(tn.reachable([nodes[2]])) == {nodes[2], nodes[3]}
assert set(tn.reachable([nodes[3]])) == {nodes[2], nodes[3]}
assert set(tn.reachable([nodes[2], nodes[3]])) == {nodes[2], nodes[3]}
assert set(tn.reachable([nodes[0], nodes[1], nodes[2]])) == set(nodes)
assert set(tn.reachable([nodes[0], nodes[1], nodes[3]])) == set(nodes)
assert set(tn.reachable([nodes[0], nodes[2], nodes[3]])) == set(nodes)
assert set(tn.reachable([nodes[1], nodes[2], nodes[3]])) == set(nodes)
def test_reachable_disconnected_2(backend):
nodes = [tn.Node(np.random.rand(2, 2, 2), backend=backend) for _ in range(4)]
nodes[1][1] ^ nodes[2][0] # connect 2nd and third node
assert set(tn.reachable([nodes[0],
nodes[1]])) == {nodes[0], nodes[1], nodes[2]}
nodes[2][1] ^ nodes[3][0] # connect third and fourth node
assert set(tn.reachable([nodes[0], nodes[1]])) == set(nodes)
def test_reachable_raises(backend):
nodes = [tn.Node(np.random.rand(2, 2, 2), backend=backend), 5]
with pytest.raises(TypeError):
tn.reachable(nodes)
def test_subgraph_sanity(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
a[0] ^ b[0]
edges = tn.get_subgraph_dangling({a})
assert edges == {a[0], a[1]}
def test_subgraph_disconnected_nodes(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
c = tn.Node(np.eye(2), backend=backend)
a[0] ^ b[0]
b[1] ^ c[1]
edges = tn.get_subgraph_dangling({a, c})
assert edges == {a[0], a[1], c[0], c[1]}
def test_full_graph_subgraph_dangling(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
c = tn.Node(np.eye(2), backend=backend)
a[0] ^ b[0]
b[1] ^ c[1]
edges = tn.get_subgraph_dangling({a, b, c})
assert edges == {a[1], c[0]}
def test_reduced_density(backend):
a = tn.Node(np.random.rand(3, 3, 3), name="A", backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name="B", backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name="C", backend=backend)
edges = tn.get_all_edges({a, b, c})
node_dict, edge_dict = tn.reduced_density([a[0], b[1], c[2]])
assert not a[0].is_dangling()
assert not b[1].is_dangling()
assert not c[2].is_dangling()
assert a[1].is_dangling() & a[2].is_dangling()
assert b[0].is_dangling() & b[2].is_dangling()
assert c[0].is_dangling() & c[1].is_dangling()
for node in {a, b, c}:
assert node_dict[node].name == node.name
for edge in edges:
assert edge_dict[edge].name == edge.name
def test_reduced_density_nondangling(backend):
a = tn.Node(np.random.rand(3, 3, 3), name="A", backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name="B", backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name="C", backend=backend)
a[0] ^ b[1]
b[2] ^ c[1]
err_msg = "traced_out_edges must only include dangling edges!"
with pytest.raises(ValueError, match=err_msg):
tn.reduced_density([a[0], b[1], c[1]])
def test_reduced_density_contraction(backend):
if backend == "pytorch":
pytest.skip("pytorch doesn't support complex numbers")
a = tn.Node(
np.array([[0.0, 1.0j], [-1.0j, 0.0]], dtype=np.complex64),
backend=backend)
tn.reduced_density([a[0]])
result = tn.contractors.greedy(tn.reachable(a), ignore_edge_order=True)
np.testing.assert_allclose(result.tensor, np.eye(2))
def test_switch_backend(backend):
a = tn.Node(np.random.rand(3, 3, 3), name="A", backend="numpy")
b = tn.Node(np.random.rand(3, 3, 3), name="B", backend="numpy")
c = tn.Node(np.random.rand(3, 3, 3), name="C", backend="numpy")
nodes = [a, b, c]
tn.switch_backend(nodes, backend)
assert nodes[0].backend.name == backend
def test_split_node_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
tn.split_node(node, left_edges=[], right_edges=[])
def test_split_node_qr_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
tn.split_node_qr(node, left_edges=[], right_edges=[])
def test_split_node_rq_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
tn.split_node_rq(node, left_edges=[], right_edges=[])
def test_split_node_full_svd_of_node_without_backend_raises_error():
node = np.random.rand(3, 3, 3)
with pytest.raises(AttributeError):
tn.split_node_full_svd(node, left_edges=[], right_edges=[])
def test_reachable_raises_value_error():
with pytest.raises(ValueError):
tn.reachable({})
def test_check_correct_raises_value_error_1(backend):
a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
edge = a.edges[0]
edge.node1 = b
edge.node2 = b
with pytest.raises(ValueError):
tn.check_correct({a, b})
def test_check_correct_raises_value_error_2(backend):
a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
edge = a.edges[0]
edge.axis1 = -1
with pytest.raises(ValueError):
tn.check_correct({a, b})
def test_get_all_nodes(backend):
a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
edge = tn.connect(a[0], b[0])
assert tn.get_all_nodes({edge}) == {a, b}
def test_switch_backend_raises_error(backend):
a = tn.Node(np.random.rand(3, 3, 3))
a.backend = AbstractBackend()
with pytest.raises(NotImplementedError):
tn.switch_backend({a}, backend)
def test_split_node_orig_shape(backend):
n1 = tn.Node(np.random.rand(3, 4, 5), backend=backend)
tn.split_node(n1, [n1[0], n1[2]], [n1[1]])
np.testing.assert_allclose(n1.shape, (3, 4, 5))
def test_split_node_full_svd_orig_shape(backend):
n1 = tn.Node(np.random.rand(3, 4, 5), backend=backend)
tn.split_node_full_svd(n1, [n1[0], n1[2]], [n1[1]])
np.testing.assert_allclose(n1.shape, (3, 4, 5))
def test_split_node_rq_orig_shape(backend):
n1 = tn.Node(np.random.rand(3, 4, 5), backend=backend)
tn.split_node_rq(n1, [n1[0], n1[2]], [n1[1]])
np.testing.assert_allclose(n1.shape, (3, 4, 5))
def test_split_node_qr_orig_shape(backend):
n1 = tn.Node(np.random.rand(3, 4, 5), backend=backend)
tn.split_node_qr(n1, [n1[0], n1[2]], [n1[1]])
np.testing.assert_allclose(n1.shape, (3, 4, 5))
def test_get_neighbors(backend):
with tn.DefaultBackend(backend):
a = tn.Node(np.ones((2, 2)))
b = tn.Node(np.ones((2, 2, 2, 2)))
c = tn.Node(np.ones((2, 2, 2)))
d = tn.Node(np.ones((2, 2)))
b[0] ^ a[1]
b[3] ^ c[2]
a[0] ^ d[1]
b[1] ^ b[2]
result = tn.get_neighbors(b)
assert result == [a, c]
def test_get_neighbors_no_duplicates(backend):
with tn.DefaultBackend(backend):
a = tn.Node(np.ones((2, 2, 2)))
b = tn.Node(np.ones((2, 2, 2, 2, 2)))
c = tn.Node(np.ones((2, 2, 2)))
d = tn.Node(np.ones((2, 2)))
b[0] ^ a[0]
b[1] ^ a[1]
b[2] ^ c[0]
a[2] ^ d[1]
b[3] ^ b[4]
result = tn.get_neighbors(b)
assert result == [a, c]
def test_redirect(backend):
n1 = tn.Node(np.random.rand(2, 2, 2), backend=backend)
n2 = tn.Node( | np.random.rand(2, 2, 2) | numpy.random.rand |
#!/usr/bin/python
#################################################
# Basic Classifier #
# Constructing a simple data set #
# Linear classifier #
# Nearest neighbors classification #
# Sk. <NAME> #
#################################################
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
# from prob_1 import KDTree
# Global data set for problem 2, 3, 4
N = 5000
mean_0 = [3, 2]
covariance_0 = [[5, 1], [1, 1]]
mean_1 = [8, 5]
covariance_1 = [[5, 0], [0, 2]]
X = np.concatenate((np.random.multivariate_normal(mean_0, covariance_0, N),
np.random.multivariate_normal(mean_1, covariance_1, N)), axis=0)
y = np.concatenate((np.zeros((N, 1), 'int64'),
np.ones((N, 1), 'int64')), axis=0)
mask = np.random.random(2*N) < 0.8
X_training = X[mask]
y_training = y[mask]
mask = np.logical_not(mask)
X_test = X[mask]
y_test = y[mask]
def prob_2():
plt.figure(figsize=(16, 12))
plt.plot(X[:N, 0], X[:N, 1], 'o', markerfacecolor='none', color='#75bbfd', label="class 0")
plt.plot(X[N:, 0], X[N:, 1], 'o', markerfacecolor='none', color='#f97306', label="class 1")
plt.xlabel('x1', fontsize=22)
plt.ylabel('x2', fontsize=22)
plt.suptitle("10000 random data points from a multivariate normal/Gaussian distributions.", fontsize=24)
plt.legend(fontsize='22')
plt.savefig('10000 random data points from a multivariate Gaussian distributions.png')
plt.show()
def prob_3():
beta = np.linalg.inv(X_training.T.dot(X_training)).dot(X_training.T).dot(y_training)
y_hat = X_test.dot(beta)
mask = X_test.dot(beta) < 0.5
y_hat[mask] = 0
mask = np.logical_not(mask)
y_hat[mask] = 1
c = np.count_nonzero(y_hat == y_test) # count the number of true elements in Boolean array
print('The classification accuracy of the algorithm is:', float(c / len(y_test))*100., '%')
y_training_new = y_training.reshape(-1) # To form an 1D array
y_test_new = y_test.reshape(-1)
y_hat_new = y_hat.reshape(-1)
training0 = X_training[y_training_new == 0]
training1 = X_training[y_training_new == 1]
correct0 = X_test[np.logical_and(y_test_new == 0, y_hat_new == 0)]
correct1 = X_test[np.logical_and(y_test_new == 1, y_hat_new == 1)]
incorrect0 = X_test[np.logical_and(y_test_new == 0, y_hat_new == 1)]
incorrect1 = X_test[np.logical_and(y_test_new == 1, y_hat_new == 0)]
plt.figure(figsize=(16, 12))
plt.plot(training0[:, 0], training0[:, 1], 's', markerfacecolor='none', color='#75bbfd', label='Training set elements from class 0')
plt.plot(training1[:, 0], training1[:, 1], 'x', color='#f97306', label='Training set elements from class 1')
plt.plot(correct0[:, 0], correct0[:, 1], 'o', markerfacecolor='none', color='#00FF00', label='Correctly classified test set elements from class 0')
plt.plot(correct1[:, 0], correct1[:, 1], '.', color='#800080', label='Correctly classified test set elements from class 1')
plt.plot(incorrect0[:, 0], incorrect0[:, 1], '*', color='#EE82EE', label='Incorrectly classified test set elements from class 0')
plt.plot(incorrect1[:, 0], incorrect1[:, 1], '+', color='k', label='Incorrectly classified test set elements from class 1')
plt.xlabel('x1', fontsize=22)
plt.ylabel('x2', fontsize=22)
plt.suptitle("Linear Classifier performance map", fontsize=24)
plt.legend()
plt.savefig('Linear Classifier performance map.png')
plt.show()
def prob_4():
KDT = cKDTree(X_training).query(X_test, k=1)
# KDT1 = KDTree(X_training).find_nearest(X_test)
y_hat = y_training[KDT [1]] # Ignoring the location of the neighbors (the second output array)
# y_hat1 = y_training[KDT1 [1]]
c = np.count_nonzero(y_hat == y_test) # count the number of true elements in Boolean array
# c1 = np.count_nonzero(y_hat1 == y_test)
print('The classification accuracy of the KD tree classifier is:', float(c / len(y_test))*100., '%')
# print('The classification accuracy of my own KD tree classifier is:', float(c1 / len(y_test))*100., '%')
y_training_new = y_training.reshape(-1)
y_test_new = y_test.reshape(-1)
y_hat_new = y_hat.reshape(-1)
training0 = X_training[y_training_new == 0]
training1 = X_training[y_training_new == 1]
correct0 = X_test[np.logical_and(y_test_new == 0, y_hat_new == 0)]
correct1 = X_test[np.logical_and(y_test_new == 1, y_hat_new == 1)]
incorrect0 = X_test[np.logical_and(y_test_new == 0, y_hat_new == 1)]
incorrect1 = X_test[ | np.logical_and(y_test_new == 1, y_hat_new == 0) | numpy.logical_and |
#!/usr/bin/env python
import os
import subprocess
import shutil
import unittest
import logging
import numpy as np
from test.test_helper import TestHelper, Poller, MessageInFileProbe,\
ExecutionProbe
from plico.utils.configuration import Configuration
from plico.rpc.zmq_remote_procedure_call import ZmqRemoteProcedureCall
from plico.utils.logger import Logger
from plico.rpc.sockets import Sockets
from plico.rpc.zmq_ports import ZmqPorts
from tipico.client.abstract_instrument_client import SnapshotEntry
from tipico_server.utils.constants import Constants
from tipico_server.utils.starter_script_creator import StarterScriptCreator
from tipico_server.utils.process_startup_helper import ProcessStartUpHelper
from tipico_server.instrument_controller.runner import Runner
from tipico_server.process_monitor.runner import Runner as ProcessMonitorRunner
from tipico.client.instrument_client import \
InstrumentClient
import sys
class IntegrationTest(unittest.TestCase):
TEST_DIR= os.path.join(os.path.abspath(os.path.dirname(__file__)),
"./tmp/")
LOG_DIR= os.path.join(TEST_DIR, "log")
CONF_FILE= 'test/integration/conffiles/tipico_server.conf'
CALIB_FOLDER= 'test/integration/calib'
CONF_SECTION= Constants.PROCESS_MONITOR_CONFIG_SECTION
SERVER_LOG_PATH= os.path.join(LOG_DIR, "%s.log" % CONF_SECTION)
BIN_DIR= os.path.join(TEST_DIR, "apps", "bin")
SOURCE_DIR= os.path.join(os.path.abspath(os.path.dirname(__file__)),
"../..")
def setUp(self):
self._setUpBasicLogging()
self.server= None
self._wasSuccessful= False
self._removeTestFolderIfItExists()
self._makeTestDir()
self.configuration= Configuration()
self.configuration.load(self.CONF_FILE)
self.rpc= ZmqRemoteProcedureCall()
calibrationRootDir= self.configuration.calibrationRootDir()
self._setUpCalibrationTempFolder(calibrationRootDir)
def _setUpBasicLogging(self):
logging.basicConfig(level=logging.DEBUG)
self._logger= Logger.of('Integration Test')
def _makeTestDir(self):
os.makedirs(self.TEST_DIR)
os.makedirs(self.LOG_DIR)
os.makedirs(self.BIN_DIR)
def _setUpCalibrationTempFolder(self, calibTempFolder):
shutil.copytree(self.CALIB_FOLDER,
calibTempFolder)
def _removeTestFolderIfItExists(self):
if os.path.exists(self.TEST_DIR):
shutil.rmtree(self.TEST_DIR)
def tearDown(self):
TestHelper.dumpFileToStdout(self.SERVER_LOG_PATH)
if self.server is not None:
TestHelper.terminateSubprocess(self.server)
if self._wasSuccessful:
self._removeTestFolderIfItExists()
def _createStarterScripts(self):
ssc= StarterScriptCreator()
ssc.setInstallationBinDir(self.BIN_DIR)
ssc.setPythonPath(self.SOURCE_DIR)
ssc.setConfigFileDestination(self.CONF_FILE)
ssc.installExecutables()
def _startProcesses(self):
psh= ProcessStartUpHelper()
serverLog= open(os.path.join(self.LOG_DIR, "server.out"), "wb")
self.server= subprocess.Popen(
[sys.executable,
psh.processProcessMonitorStartUpScriptPath(),
self.CONF_FILE,
self.CONF_SECTION],
stdout=serverLog, stderr=serverLog)
Poller(5).check(MessageInFileProbe(
ProcessMonitorRunner.RUNNING_MESSAGE, self.SERVER_LOG_PATH))
def _testProcessesActuallyStarted(self):
controllerLogFile= os.path.join(
self.LOG_DIR,
'%s.log' % Constants.SERVER_1_CONFIG_SECTION)
Poller(5).check(MessageInFileProbe(
Runner.RUNNING_MESSAGE, controllerLogFile))
controller2LogFile= os.path.join(
self.LOG_DIR,
'%s.log' % Constants.SERVER_2_CONFIG_SECTION)
Poller(5).check(MessageInFileProbe(
Runner.RUNNING_MESSAGE, controller2LogFile))
def _buildClients(self):
ports1= ZmqPorts.fromConfiguration(
self.configuration, Constants.SERVER_1_CONFIG_SECTION)
self.instrumentClient1= InstrumentClient(
self.rpc, Sockets(ports1, self.rpc))
ports2= ZmqPorts.fromConfiguration(
self.configuration, Constants.SERVER_2_CONFIG_SECTION)
self.instrumentClient2= InstrumentClient(
self.rpc, Sockets(ports2, self.rpc))
def _testMoveTo(self):
actuatorPosition= np.arange(10)
self.instrumentClient1.moveTo(actuatorPosition)
Poller(3).check(ExecutionProbe(
lambda: self.assertTrue(
np.allclose(
actuatorPosition,
self.instrumentClient1.getPosition()))))
def _checkBackdoor(self):
self.instrumentClient1.execute(
"import numpy as np; "
"self._myarray= np.array([1, 2])")
self.assertEqual(
repr(np.array([1, 2])),
self.instrumentClient1.eval("self._myarray"))
self.instrumentClient1.execute("self._foobar= 42")
self.assertEqual(
"42",
self.instrumentClient1.eval("self._foobar"))
def _testGetStatus(self):
status= self.instrumentClient1.getStatus()
cmdCounter= status.commandCounter()
self.instrumentClient1.moveTo( | np.arange(4) | numpy.arange |
import pandas as pd
import numpy as np
import xgboost as xgb
from tqdm import tqdm
import keras
from sklearn.svm import SVC
from sklearn import preprocessing, decomposition, metrics, pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from nltk import word_tokenize
from nltk.corpus import stopwords
import warnings
from lib import BaseTrain
from lib import Net1, Net2, Net3, Net4
warnings.filterwarnings('ignore')
def multiclass_logloss(actual, predicted, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
:param actual: Array containing the actual target classes
:param predicted: Matrix with class predictions, one probability per class
"""
# Convert 'actual' to a binary array if it's not already:
if len(actual.shape) == 1:
actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = | np.clip(predicted, eps, 1 - eps) | numpy.clip |
from pathlib import Path
import numpy as np
import gym
from gym import spaces, logger
from gym.utils import seeding
import matplotlib.pyplot as plt
from pycel import ExcelCompiler
class Parameters:
# (Avoid sampling random variables here: they would not be resampled upon reset())
# problem-specific parameters
techs = 3 # number of technologies (Offshore wind power, blue hydrogen, green hydrogen)
# fmt: off
reward_types = 6 # capital expenditure (capex), operating expenditure (opex), revenue, carbon emissions, total jobs supported, total economic impact
steps_per_episode = 20 # number of years in the planning horizon (2031 -> 2050 = 20)
# fmt: on
# This 'Pathways to Net Zero' environment manipulates a spreadsheet loaded in memory. The following 20 columns correspond to years 2031 to 2050 in tabs named 'Outputs' and 'CCUS':
# fmt: off
pathways2Net0ColumnInds = np.array(['P','Q','R','S','T','U','V','W','X','Y','Z','AA','AB','AC','AD','AE','AF','AG','AH','AI'])
# fmt: on
# The following 20 rows correspond to years 2031 to 2050 in tabs named 'BREEZE', 'GALE', and 'STORM':
pathways2Net0RowInds = np.arange(36, 36 + steps_per_episode)
# pathways2Net0ColumnInds[state.step_count] and pathways2Net0RowInds[state.step_count] will locate the current year's column / row respectively
# Multiplicative noise is applied to all costs. The parameters of this randomisation are:
noise_mu = 1.0
noise_sigma = 0.1
noise_clipping = 0.5 # (i.e., costs are reduced by 50% at the most)
noise_sigma_factor = np.sqrt(0.1) # this factor is applied to make CCUS capex & opex less volatile than other costs
# The costs in the Carbon capture utilisation and storage (CCUS) tab to be randomised are capex, opex, and carbon price, with these row numbers:
pathways2Net0RandomRowInds_CCUS = np.array([23, 24, 26])
# The costs in the 'Outputs' tab to be randomised are Offshore wind - Devex, Capex, and Opex, Green Hydrogen - Capex, Fixed Opex, and Variable Opex, Blue Hydrogen - price, Gas feedstock price, Capex, Fixed opex, Variable opex, and Natural gas cost, with these row numbers:
# fmt: off
pathways2Net0RandomRowInds_Outputs = np.array([148, 149, 150, 153, 154, 155, 158, 159, 163, 164, 165, 166])
# fmt: on
# multiplicative noise's mu and sigma, and clipping point:
noise_mu = 1.0
noise_sigma = 0.1 # or try 0.1, 0.0, np.sqrt(0.001), 0.02, np.sqrt(0.0003), 0.015, 0.01, np.sqrt(0.00001), 0.001
noise_clipping = 0.5 # or try 0.001, 0.1, 0.5 (i.e., original costs are reduced by 50% at the most)
noise_sigma_factor = np.sqrt(0.1) # as in https://github.com/rangl-labs/netzerotc/issues/36, CCUS capex & opex (CCUS row 23 and 24) should have smaller standard deviations
stochastic_sigma = False # set to False to use one single noise_sigma; set to True to randomly switch between two different std:
# noise_sigma_low = 0.001
# noise_sigma_high = np.sqrt(0.00001)
# OR, sample a sigma from a uniform distribution centered at noise_sigma with total 2-side range of noise_sigma_range:
noise_sigma_range = 0.002
noise_observability = False # set to True to make the observation_space contain randomized costs/prices; set to False to restrict the observation_space to contain only the state.step_count
class State:
def __init__(self, seed=None, param=Parameters()):
np.random.seed(seed=seed)
self.initialise_state(param)
def initialise_state(self, param):
# create local copy of spreadsheet model to be manipulated
self.pathways2Net0 = param.pathways2Net0
# create an array of costs for the current year and populate with 2030 costs (column 'O' in 'CCUS' and 'Outputs' tabs):
self.randomized_costs = np.ones(
len(param.pathways2Net0RandomRowInds_CCUS)
+ len(param.pathways2Net0RandomRowInds_Outputs)
)
for costRowID in np.arange(len(param.pathways2Net0RandomRowInds_CCUS)):
self.randomized_costs[costRowID] = param.pathways2Net0.evaluate(
"CCUS!O" + str(param.pathways2Net0RandomRowInds_CCUS[costRowID])
)
for costRowID in np.arange(len(param.pathways2Net0RandomRowInds_Outputs)):
self.randomized_costs[
len(param.pathways2Net0RandomRowInds_CCUS) + costRowID
] = param.pathways2Net0.evaluate(
"Outputs!O" + str(param.pathways2Net0RandomRowInds_Outputs[costRowID])
)
self.noise_observability = param.noise_observability
# time variables
# NOTE: our convention is to update step_count at the beginning of the gym step() function
self.step_count = -1
self.steps_per_episode = param.steps_per_episode
# initial jobs supported in 2030
self.jobs = np.float32(
110484
)
# variable to record jobs created each year
self.jobs_increment = | np.zeros(1, dtype=np.float32) | numpy.zeros |
import numpy as np
import gym
import torch
from utils.others import NormalizedActions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Q_estimator:
def __init__(self, config, agent, best_uncertain):
self.config = config
self.agent = agent
self.best_uncertain = torch.FloatTensor(best_uncertain.reshape(1, -1)).to(device)
self.eval_env = gym.make(self.config['env'])
self.eval_env._max_episode_steps = 20000
self.eval_env = NormalizedActions(self.eval_env)
self.eval_env.seed(self.config['seed'] + 100)
self.eval_env.action_space.seed(self.config['seed'])
self.all_action = []
num = int(100 / self.config['state_action_pairs'])
for x in range(num, 100 + num, num):
action_list = self.get_action_list(x, self.eval_env)
self.all_action.append(action_list)
def get_action_list(self, random_step, eval_env):
action_list = []
eval_env.seed(self.config['seed'] + 100)
state, done = eval_env.reset(), False
for x in range(random_step):
action = self.agent.select_action(np.array(state),
uncertain=self.best_uncertain)
state, reward, done, _ = eval_env.step(action)
action_list.append(action)
return action_list
def cal_Q_bias(self, action_list, MC_samples, max_mc_steps, eval_env):
Q_mc = []
for x in range(MC_samples):
eval_env.seed(self.config['seed'] + 100)
state, done = eval_env.reset(), False
for action in action_list:
last_state = state
last_state_action = action
state, reward, done, _ = eval_env.step(action)
Q_mean, _ = self.agent.get_mean_std(torch.FloatTensor(last_state.reshape(1, -1)).to(device),
torch.FloatTensor(last_state_action.reshape(1, -1)).to(device),
self.best_uncertain)
total_reward = reward
for y in range(max_mc_steps):
state_ = torch.FloatTensor(state.reshape(1, -1)).to(device)
action, log_prob, _, _, _ = self.agent.get_action_log_prob(state_, self.best_uncertain)
state, reward, done, _ = eval_env.step(action.cpu().data.numpy().flatten())
logprob = float(log_prob)
temperature = float(self.agent.temperature(self.best_uncertain))
total_reward += (reward - logprob * temperature) * self.agent.discount ** (y + 1)
if done:
break
Q_mc.append(total_reward)
bias = float(Q_mean) - float( | np.mean(Q_mc) | numpy.mean |
import os
import time
import h5py
import numpy as np
import pytest
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as trsf
from continuum.scenarios import ContinualScenario, ClassIncremental, Permutations
from continuum.datasets import H5Dataset, CIFAR100, MNIST
from continuum.tasks.h5_task_set import H5TaskSet
from continuum.tasks import split_train_val
from continuum.scenarios import create_subscenario
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
@pytest.fixture
def data():
x_ = np.random.randint(0, 255, size=(20, 32, 32, 3))
y_ = []
for i in range(10):
y_.append(np.ones(2) * i)
y_ = np.concatenate(y_)
t_ = np.copy(y_) // 5
return x_, y_.astype(int), t_.astype(int)
# yapf: disable
def test_creation_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
x_0, y_0, t_0 = h5dataset.get_data()
assert isinstance(x_0, str) # x is only the path to the file
assert len(y_0) == len(y_)
assert len(t_0) == len(t_)
def test_concatenate_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
h5dataset.add_data(x_, y_, t_)
assert len(h5dataset.get_class_vector()) == 2 * len(y_)
def test_create_subscenario_h5dataset(data, tmpdir):
from continuum.scenarios import create_subscenario
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len(np.unique(t_))
scenario = ContinualScenario(h5dataset)
sub_scenario = create_subscenario(scenario, np.arange(nb_task - 1))
for task_set in sub_scenario:
loader = DataLoader(task_set)
for _ in loader:
pass
assert sub_scenario.nb_tasks == nb_task - 1
def test_create_subscenario_suffle_h5dataset(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len(np.unique(t_))
scenario = ContinualScenario(h5dataset)
task_order = np.arange(nb_task)
np.random.shuffle(task_order)
sub_scenario = create_subscenario(scenario, task_order)
for task_set in sub_scenario:
loader = DataLoader(task_set)
for _ in loader:
pass
assert sub_scenario.nb_tasks == nb_task
def test_h5dataset_ContinualScenario(data, tmpdir):
filename_h5 = os.path.join(tmpdir, "test_h5.hdf5")
x_, y_, t_ = data
h5dataset = H5Dataset(x_, y_, t_, data_path=filename_h5)
nb_task = len( | np.unique(t_) | numpy.unique |
# coding: utf-8
import numpy
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping,Callback
from time import time
from sklearn import metrics
def Feature_Selection(X_Data, Y_Data):
print("Feature Selection")
model = ExtraTreesClassifier(n_estimators=500, random_state=0, n_jobs=1, max_depth=25)
model.fit(X_Data, Y_Data.values.ravel())
importances = model.feature_importances_
std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
Best = []
important = []
for f in range(X_Data.shape[1]):
if (importances[indices[f]] > 0.000001):
Best.append(indices[f])
print("%d. Feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
important.append(importances[indices[f]])
print(len(Best), Best)
X_Data_Best = pd.DataFrame(columns=Best)
print(X_Data_Best.shape)
X_Data_Best_copy = pd.DataFrame(columns=Best[:22])
print(X_Data_Best_copy.shape)
for i in Best:
X_Data_Best[i] = X_Data[i]
print(X_Data_Best.shape)
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_Data_Best_copy.shape[1]), important[:22],
color="r", yerr=std[Best[:22]], align="center")
plt.xticks(range(X_Data_Best_copy.shape[1]), Best[:22])
plt.xlim([-1, X_Data_Best_copy.shape[1]])
plt.ylabel('Percent')
plt.xlabel('Feature index')
plt.show()
return X_Data_Best
def main():
print("python main function")
SIZE_IMG = 64 #9*9
Train = pd.read_csv("Layer2.csv",header=None)
#Test = pd.read_csv("Testing20percent.csv",header=None)
Y_Data = Train.iloc[:, -1]
#Y_Test = Test.iloc[:, -1:]
X_Data = Train.iloc[:, :-1]
#X_Test = Test.iloc[:, :-1]
#X_Data = pd.concat([X_Train, X_Test], axis=0)
#Y_Data = pd.concat([Y_Train, Y_Test], axis=0)
print("Printing Y-data")
print("Printing X-data")
X_Data_Best = Feature_Selection(X_Data, Y_Data)
X_Data_Final = Image_Creation(X_Data_Best, SIZE_IMG)
Y_Data_Final = Label_Encoding(Y_Data)
MyModel(X_Data_Final, Y_Data_Final)
def Image_Creation(X_Data_Best, SIZE_IMG):
print("Image Creation")
i = 0
X_Data_Final = []
while i < len(X_Data_Best):
X = X_Data_Best.iloc[i]
X = np.concatenate([X, np.zeros(SIZE_IMG - len(X))])
X = X.reshape(8, 8).astype(np.uint8)
X_Data_Final.append(X)
i = i + 1
return X_Data_Final
def Label_Encoding(Y_Data):
print("Label Encoding")
Y_Data_Final = Y_Data
le = LabelEncoder()
le.fit(Y_Data_Final)
list(le.classes_)
Y_Data_Final = le.transform(Y_Data_Final)
Y_Data_Final = np.reshape(Y_Data_Final, (-1, 1))
return Y_Data_Final
def MyModel(X_Data_Final, Y_Data_Final):
print("Model")
start = time()
epochs = 1500
optimizer = 'adam'
numpy.random.seed(9)
X_tr, X_te, Y_tr, Y_te = train_test_split(X_Data_Final, Y_Data_Final, test_size=0.20, random_state=42)
X_tr = np.array(X_tr).reshape(-1, 8, 8, 1)
X_te = np.array(X_te).reshape(-1, 8, 8, 1)
early_stopping_monitor = EarlyStopping(patience=3)
print(X_tr.shape, X_te.shape,Y_tr.shape,Y_te.shape)
model_2 = Sequential()
model_2.add(Conv2D(32, (3, 3), activation='relu', input_shape=X_tr.shape[1:]))
#model_2.add(MaxPooling2D(pool_size=(2, 2)))
#model_2.add(Dropout(0.015))
model_2.add(Conv2D(64, (3, 3), activation='relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
#model_2.add(Dropout(0.015))
#model_2.add(Conv2D(128, (2, 2), activation='relu'))
#model_2.add(MaxPooling2D(pool_size=(2, 2)))
#model_2.add(Dropout(0.005))
model_2.add(Flatten())
model_2.add(Dense(256, activation='relu'))
#Please replace the number of neurons in dense layer below with number of labels in the dataset
model_2.add(Dense(12, activation='softmax'))
#model_2.add(Dropout(0.015))
#model_2.summary()
model_2.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
history = model_2.fit(X_tr, Y_tr, validation_data=(X_te, Y_te), epochs=epochs, callbacks=[early_stopping_monitor],
batch_size=32, verbose=0)
print(time()-start)
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
Y_Predict = model_2.predict_proba(X_te)
######Predicting Category
i = 0
y_pred = []
while i < len(Y_Predict):
maxi = Y_Predict[i].argmax()
y_pred.append(maxi)
i = i + 1
#######Predicting Category over
print(metrics.classification_report(Y_te, y_pred, digits=2))
#Please replace the classes in line below with labels in the dataset
plot_confusion_matrix(Y_te, y_pred, classes = ['Adware','Backdoor','FileInfector','PUA','Ransomware','Riskware','Scareware','Trojan','Trojan_Banker','Trojan_Dropper','Trojan_SMS','Trojan_Spy'],
title='Confusion matrix, without normalization')
#plot_confusion_matrix(Y_te, y_pred, classes = ['BENIGN','DNS','LDAP','MSSQL','NTP','NetBIOS','SNMP','SSDP','Syn','TFTP','UDP','UDPLag'],
# normalize=True, title='Normalized Confusion matrix')
plt.show()
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = ['Audio-Streaming','Browsing','Chat','Email','File-Transfer','P2P','Video-Streaming','VOIP']
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
fig.set_size_inches(14, 14)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks= | np.arange(cm.shape[1]) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 11:52:51 2019
@author: sdenaro
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
#import scipy.stats as st
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
df_temp.columns=['Time','SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
his_temp_matrix = df_temp.values
###############################
# Synthetic HDD CDD calculation
# Simulation data
#sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
sim_temperature=df_temp
sim_temperature=sim_temperature.drop(['Time'], axis=1)
sim_temperature=sim_temperature.values
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheetname='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_sums = np.reshape(np.sum(BPA,axis= 0).values,(1,num_BPA))
CA_sums = np.reshape(np.sum(CA,axis=0).values,(1,num_CA))
WB_sums = np.reshape(np.sum(WB,axis=0).values,(1,num_Will))
HO_sums = np.reshape(np.sum(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = np.column_stack((BPA_sums,CA_sums,WB_sums,HO_sums))
if y_index < 1:
hist_totals = joined
else:
hist_totals = np.vstack((hist_totals,joined))
BPA_headers = np.reshape(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = np.reshape(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = np.reshape(list(Willamette_streamflow.loc[:,'Albany':]),(1,num_Will))
HO_headers = np.reshape(['Hoover'],(1,1))
headers = np.column_stack((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
added_value=abs(np.min((df_hist_totals)))+5
log_hist_total=np.log(df_hist_totals+abs(added_value))
#########################################
# annual flow regression - predicts annual flows at each site as a function
# of total annual HDD and CDD across every weather station
#train on historical data
M = | np.column_stack((annual_CDD,annual_HDD)) | numpy.column_stack |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch as to
from init_args_serializer import Serializable
import pyrado
from pyrado.environments.quanser import MAX_ACT_QQ
from pyrado.environments.quanser.base import QuanserReal
from pyrado.policies.special.environment_specific import QQubeGoToLimCtrl, QQubePDCtrl
from pyrado.spaces.box import BoxSpace
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import RadiallySymmDesStateTask
from pyrado.tasks.reward_functions import ExpQuadrErrRewFcn
from pyrado.utils.input_output import completion_context, print_cbt
class QQubeSwingUpReal(QuanserReal, Serializable):
"""Class for the real Quanser Qube a.k.a. Furuta pendulum"""
name: str = "qq-su"
def __init__(
self,
dt: float = 1 / 500.0,
max_steps: int = pyrado.inf,
task_args: Optional[dict] = None,
ip: str = "192.168.2.17",
):
"""
Constructor
:param dt: sampling frequency on the Quanser device [Hz]
:param max_steps: maximum number of steps executed on the device [-]
:param task_args: arguments for the task construction
:param ip: IP address of the Qube platform
"""
Serializable._init(self, locals())
# Initialize spaces, dt, max_step, and communication
super().__init__(ip, rcv_dim=4, snd_dim=1, dt=dt, max_steps=max_steps, task_args=task_args)
self._curr_act = np.zeros(self.act_space.shape) # just for usage in render function
self._sens_offset = np.zeros(4) # last two entries are never calibrated but useful for broadcasting
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", np.array([0.0, np.pi, 0.0, 0.0]))
Q = task_args.get("Q", np.diag([3e-1, 1.0, 2e-2, 5e-3]))
R = task_args.get("R", np.diag([4e-3]))
return RadiallySymmDesStateTask(self.spec, state_des, ExpQuadrErrRewFcn(Q, R), idcs=[1])
def _create_spaces(self):
# Define the spaces
max_state = np.array([120.0 / 180 * np.pi, 4 * np.pi, 20 * np.pi, 20 * np.pi]) # [rad, rad, rad/s, rad/s]
max_obs = np.array([1.0, 1.0, 1.0, 1.0, pyrado.inf, pyrado.inf]) # [-, -, -, -, rad/s, rad/s]
self._state_space = BoxSpace(-max_state, max_state, labels=["theta", "alpha", "theta_dot", "alpha_dot"])
self._obs_space = BoxSpace(
-max_obs, max_obs, labels=["sin_theta", "cos_theta", "sin_alpha", "cos_alpha", "theta_dot", "alpha_dot"]
)
self._act_space = BoxSpace(-MAX_ACT_QQ, MAX_ACT_QQ, labels=["V"])
@property
def task(self) -> Task:
return self._task
def observe(self, state) -> np.ndarray:
return np.array([np.sin(state[0]), np.cos(state[0]), np.sin(state[1]), np.cos(state[1]), state[2], state[3]])
def reset(self, *args, **kwargs) -> np.ndarray:
# Reset socket and task
super().reset()
# Run calibration routine to start in the center
self.calibrate()
# Start with a zero action and get the first sensor measurements
meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))
# Correct for offset, and construct the state from the measurements
self.state = self._correct_sensor_offset(meas)
# Reset time counter
self._curr_step = 0
return self.observe(self.state)
def _correct_sensor_offset(self, meas: np.ndarray) -> np.ndarray:
return meas - self._sens_offset
def _wait_for_pole_at_rest(self, thold_ang_vel: float = 0.1 / 180.0 * np.pi):
"""Wait until the Qube's rotating pole is at rest"""
cnt = 0
while cnt < 1.5 / self._dt:
# Get next measurement
meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))
if np.abs(meas[2]) < thold_ang_vel and np.abs(meas[3]) < thold_ang_vel:
cnt += 1
else:
cnt = 0
def calibrate(self):
"""Calibration routine to move to the init position and determine the sensor offset"""
with completion_context("Estimating sensor offset", color="c", bright=True):
# Reset calibration
self._sens_offset = np.zeros(4) # last two entries are never calibrated but useful for broadcasting
self._wait_for_pole_at_rest()
# Create parts of the calibration controller
go_right = QQubeGoToLimCtrl(positive=True, cnt_done=int(1.5 / self._dt))
go_left = QQubeGoToLimCtrl(positive=False, cnt_done=int(1.5 / self._dt))
go_center = QQubePDCtrl(self.spec)
# Estimate alpha offset. Go to both limits for theta calibration.
meas = self._qsoc.snd_rcv( | np.zeros(self.act_space.shape) | numpy.zeros |
import numpy as np
import math
from basic_import import *
Sparse_degree = [2, 3, 5, 7, 10, 20, 30, 50, 100, 300, 1000]
# OMP algorithm representation
def omp(diction_with_error, b):
residual = b
index_matrix = []
index_matrix_whole = []
index_set = []
last_residual = 0
L = math.floor(Sparse_degree[6])
# L = math.floor(diction_with_error[0].shape[0]*3/4)
# iterate the omp process
cnt = 0
cnt_repre = 0
for i in range(L):
c_k = np.fabs(np.dot(diction_with_error.T, residual)) # dot choose the kth index
# print(c_k)
k = np.where(c_k == np.max(c_k))[0][0] # position of the largest projection
while k in index_set:
c_k[k] = 0
k = np.where(c_k == np.max(c_k))[0][0]
index_set.append(k) # update index set
index_matrix.append(diction_with_error.T[k].tolist()) # update index_matrix set
# index_matrix_whole.append(diction_with_error.T[k])
A_k = np.array(index_matrix).T # transform the index_matrix to numpy form
x_k = np.linalg.pinv(A_k.T.dot(A_k)).dot(A_k.T).dot(b) #least squares method
residual = b - A_k.dot(x_k) # compute the residual
if abs(np.linalg.norm(residual)-np.linalg.norm(last_residual)) < 1e-8:
cnt += 1
if cnt >= 10:
break
# print(np.linalg.norm(residual), " ", i, "/", L)# show the residual
last_residual = residual
if i+1 >= diction_with_error[0].shape[0]:
break
A_k = np.array(index_matrix).T # final support-dictionary matrix
x_k = np.linalg.pinv(A_k.T.dot(A_k)).dot(A_k.T).dot(b) # final support-presentation vector(include x and error)
# A_whole_k = np.array(index_matrix_whole).T
# x_whole_k = np.linalg.inv(A_whole_k.T.dot(A_whole_k)).dot(A_whole_k.T).dot(b_whole)
x_hat = [] # final representation vector
for t in range(diction_with_error[0].shape[0]):
x_hat.append(0)
for t in range(len(x_k)):
x_hat[index_set[t]] = x_k[t] # construct complete
x = np.array(x_hat)
return x
def x_select(x, diction_with_error, b, gender):
#########################
# Method 1 #
#########################
delta_x = [] # delta_x[i] means the vector only
for i in range(50): # contains the parameters of the ith class
delta_x_i = []
for j in range(700):
if i*14 <= j <= i*14+13:
delta_x_i.append(x[j])
else:
delta_x_i.append(0)
delta_x.append(np.array(delta_x_i))
delta_x = np.array(delta_x)
r_set = [] # calculate the residual of every delta_x[i]
for delta_x_i in delta_x: # select the vector with least residual
r = b - diction_with_error.dot(delta_x_i)
r_set.append(np.linalg.norm(r))
r_set = np.array(r_set)
k = np.where(r_set == | np.min(r_set) | numpy.min |
import sys
import json
import os
import numpy as np
import pickle
import cv2
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
from tabulate import tabulate
from torchvision.transforms import ToTensor
import torchvision.transforms.functional as F
from copy import deepcopy
from scipy.interpolate import interp1d
import imgaug.augmenters as iaa
from imgaug.augmenters import Resize
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from db.detection import DETECTION
from config import system_configs
from db.tools import eval_3D_lane
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
DARK_GREEN = (115, 181, 34)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
PINK = (180, 105, 255)
CYAN = (255, 128, 0)
CHOCOLATE = (30, 105, 210)
PEACHPUFF = (185, 218, 255)
STATEGRAY = (255, 226, 198)
GT_COLOR = [PINK, CYAN, ORANGE, YELLOW, BLUE]
PRED_COLOR = [RED, GREEN, DARK_GREEN, PURPLE, CHOCOLATE, PEACHPUFF, STATEGRAY]
PRED_HIT_COLOR = GREEN
PRED_MISS_COLOR = RED
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406])
IMAGENET_STD = np.array([0.229, 0.224, 0.225])
class APOLLOSIM(DETECTION):
def __init__(self, db_config, split, is_eval=False, is_resample=True, is_predcam=False):
super(APOLLOSIM, self).__init__(db_config)
data_dir = system_configs.data_dir
# result_dir = system_configs.result_dir
cache_dir = system_configs.cache_dir
max_lanes = system_configs.max_lanes
self.metric = 'default'
self.is_resample = is_resample
print('is_resample: {}'.format(is_resample))
print('is_predcam: {}'.format(is_predcam))
inp_h, inp_w = db_config['input_size']
# define image pre-processor
# self.totensor = transforms.ToTensor()
# self.normalize = transforms.Normalize(args.vgg_mean, args.vgg_std)
# self.data_aug = data_aug # False
# dataset parameters
# dataset_name = 'standard' # illus_chg/rare_subset/standard
self.dataset_name = system_configs.dataset_name # illus_chg
self.no_3d = False
self.no_centerline = True
self.h_org = 1080
self.w_org = 1920
self.org_h = 1080
self.org_w = 1920
self.h_crop = 0
self.crop_y = 0
# parameters related to service network
self.h_net = inp_h
self.w_net = inp_w
self.resize_h = inp_h
self.resize_w = inp_w
self.ipm_h = 208
self.ipm_w = 128
self.top_view_region = np.array([[-10, 103], [10, 103], [-10, 3], [10, 3]])
self.K = np.array([[2015., 0., 960.], [0., 2015., 540.], [0., 0., 1.]])
self.H_crop_ipm = self.homography_crop_resize([self.h_org, self.w_org], self.h_crop, [self.h_net, self.w_net])
self.H_crop_im = self.homography_crop_resize([self.h_org, self.w_org], self.h_crop, [self.h_org, self.w_org])
# org2resized+cropped
self.H_ipm2g = cv2.getPerspectiveTransform(
np.float32([[0, 0], [self.ipm_w - 1, 0], [0, self.ipm_h - 1], [self.ipm_w - 1, self.ipm_h - 1]]),
np.float32(self.top_view_region))
self.fix_cam = False
x_min = self.top_view_region[0, 0] # -10
x_max = self.top_view_region[1, 0] # 10
self.x_min = x_min # -10
self.x_max = x_max # 10
self.anchor_y_steps = [ 5, 10, 15, 20, 30, 40, 50, 60, 80, 100]
self.y_min = self.top_view_region[2, 1]
self.y_max = self.top_view_region[0, 1]
if self.is_resample:
self.gflatYnorm = self.anchor_y_steps[-1]
self.gflatZnorm = 10
self.gflatXnorm = 30
else:
self.gflatYnorm = 200
self.gflatZnorm = 16
self.gflatXnorm = 30
self.pitch = 3 # pitch angle of camera to ground in centi degree
self.cam_height = 1.55 # height of camera in meters
self.batch_size = system_configs.batch_size
if self.no_centerline: # False
self.num_types = 1
else:
self.num_types = 3
if self.is_resample:
self.sample_hz = 1
else:
self.sample_hz = 4
self._split = split
self._dataset = {
"train": ['train'],
"test": ['test'],
"sub_train": ['sub_train'],
"validation": ['validation'],
}[self._split]
self.root = os.path.join(data_dir, 'Apollo_Sim_3D_Lane_Release')
data_dir = os.path.join(self.root, 'data_splits', self.dataset_name)
if self.root is None:
raise Exception('Please specify the root directory')
self.img_w, self.img_h = self.h_org, self.w_org # apollo sim original image resolution
self.max_2dlanes = 0
self.max_gflatlanes = 0
self.max_3dlanes = 0
self.max_2dpoints = 0
self.max_gflatpoints = 0
self.max_3dpoints = 0
self.X3d, self.Y3d, self.Z3d = [0, 0], [0, 0], [0, 0]
self.Xgflat, self.Ygflat = [0, 0], [0, 0]
self.normalize = True
self.to_tensor = ToTensor()
self.aug_chance = 0.9090909090909091
self._image_file = []
self.augmentations = [{'name': 'Affine', 'parameters': {'rotate': (-10, 10)}},
{'name': 'HorizontalFlip', 'parameters': {'p': 0.5}},
{'name': 'CropToFixedSize', 'parameters': {'height': 972, 'width': 1728}}]
# Force max_lanes, used when evaluating testing with models trained on other datasets
# if max_lanes is not None:
# self.max_lanes = max_lanes
self.anno_files = [os.path.join(data_dir, path + '.json') for path in self._dataset]
self._data = "apollosim_t"
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
0
] # 0 car
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(cache_dir, "apollosim_{}.pkl".format(self._dataset))
if self.augmentations is not None:
augmentations = [getattr(iaa, aug['name'])(**aug['parameters'])
for aug in self.augmentations] # add augmentation
transformations = iaa.Sequential([Resize({'height': inp_h, 'width': inp_w})])
self.transform = iaa.Sequential([iaa.Sometimes(then_list=augmentations, p=self.aug_chance), transformations])
if self.dataset_name == 'standard':
# Balanced: Below camera poses are got with geometry constraints learning
result_path = "../3DLSCPTRZoos/Pv_standard/results/Pv-stage_standard/500000/testing/apollosim_standard_test_predictions_apollosim.json"
elif self.dataset_name == 'rare_subset':
# Rare observed: Below camera poses are got with geometry constraints learning
result_path = "../3DLSCPTRZoos/Pv_standard/results/Pv-stage_rare_subset/500000/testing/apollosim_rare_subset_test_predictions_apollosim.json"
elif self.dataset_name == 'illus_chg':
# Illus change: Below camera poses are got with geometry constraints learning
result_path = "../3DLSCPTRZoos/Pv_illus_chg/results/Pv-stage_illus_chg/500000/testing/apollosim_illus_chg_test_predictions_apollosim.json"
else:
raise ValueError('invalid dataset_name: {}'.format(self.dataset_name))
if is_eval:
if is_predcam:
self._load_predcam_data(result_path=result_path)
else:
self._load_eval_data()
else:
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
def _load_data(self, debug_lane=False):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
self._transform_annotations()
if debug_lane:
pass
else:
with open(self._cache_file, "wb") as f:
pickle.dump([self._annotations,
self._image_ids,
self._image_file,
self.max_2dlanes, self.max_3dlanes, self.max_gflatlanes,
self.max_2dpoints, self.max_3dpoints, self.max_gflatpoints,
self.X3d, self.Y3d, self.Z3d,
self.Xgflat, self.Ygflat], f)
else:
with open(self._cache_file, "rb") as f:
(self._annotations,
self._image_ids,
self._image_file,
self.max_2dlanes, self.max_3dlanes, self.max_gflatlanes,
self.max_2dpoints, self.max_3dpoints, self.max_gflatpoints,
self.X3d, self.Y3d, self.Z3d,
self.Xgflat, self.Ygflat) = pickle.load(f)
assert self.max_2dlanes == self.max_3dlanes
assert self.max_3dlanes == self.max_gflatlanes
assert self.max_2dpoints == self.max_3dpoints
assert self.max_3dpoints == self.max_gflatpoints
print('{}.max_2dlanes: {}\n'
'{}.max_3dlanes: {}\n'
'{}.max_gflatlanes: {}\n'
'{}.max_2dpoints: {}\n'
'{}.max_3dpoints: {}\n'
'{}.max_gflatpoints: {}\n'
'{}.X3d: {}\n'
'{}.Y3d: {}\n'
'{}.Z3d: {}\n'
'{}.Xgflat: {}\n'
'{}.Ygflat: {}'.format(self.dataset_name, self.max_2dlanes,
self.dataset_name, self.max_3dlanes,
self.dataset_name, self.max_gflatlanes,
self.dataset_name, self.max_2dpoints,
self.dataset_name, self.max_3dpoints,
self.dataset_name, self.max_gflatpoints,
self.dataset_name, self.X3d,
self.dataset_name, self.Y3d,
self.dataset_name, self.Z3d,
self.dataset_name, self.Xgflat,
self.dataset_name, self.Ygflat))
def _extract_data(self):
image_id = 0
max_2dlanes, max_3dlanes, max_gflatlanes = 0, 0, 0
self._old_annotations = {}
for anno_file in self.anno_files:
with open(anno_file, 'r') as anno_obj:
for line in anno_obj:
info_dict = json.loads(line)
# dict_keys(['raw_file', 'cam_height', 'cam_pitch',
# 'centerLines', 'laneLines', 'centerLines_visibility', 'laneLines_visibility'])
gt_lane_pts = info_dict['laneLines']
if len(gt_lane_pts) < 1:
continue
gt_lane_visibility = info_dict['laneLines_visibility']
image_path = os.path.join(self.root, info_dict['raw_file'])
assert os.path.exists(image_path), '{:s} not exist'.format(image_path)
# if not self.fix_cam:
gt_cam_height = info_dict['cam_height']
gt_cam_pitch = info_dict['cam_pitch']
P_g2im = self.projection_g2im(gt_cam_pitch, gt_cam_height, self.K) # used for x=PX (3D to 2D)
H_g2im = self.homograpthy_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_im2g = np.linalg.inv(H_g2im)
P_g2gflat = np.matmul(H_im2g, P_g2im)
aug_mat = np.identity(3, dtype=np.float)
gt_lanes = []
# org_gt_lanes = []
for i, lane in enumerate(gt_lane_pts):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
closest_point = lane[0]
remotest_point = lane[-1]
sampled_points = lane[1:-1:self.sample_hz]
sampled_points.insert(0, closest_point)
sampled_points.append(remotest_point)
lane = np.array(sampled_points)
# lane = np.array(lane[::self.sample_hz])
closest_viz = gt_lane_visibility[i][0]
remotest_viz = gt_lane_visibility[i][-1]
sampled_viz = gt_lane_visibility[i][1:-1:self.sample_hz]
sampled_viz.insert(0, closest_viz)
sampled_viz.append(remotest_viz)
lane_visibility = np.array(sampled_viz)
# lane_visibility = np.array(gt_lane_visibility[i][::self.sample_hz])
# prune gt lanes by visibility labels
pruned_lane = self.prune_3d_lane_by_visibility(lane, lane_visibility)
# prune out-of-range points are necessary before transformation -30~30
pruned_lane = self.prune_3d_lane_by_range(pruned_lane, 3*self.x_min, 3*self.x_max)
# Resample
if self.is_resample:
if pruned_lane.shape[0] < 2:
continue
# Above code resample 3D points
# print(pruned_lane.shape)
pruned_lane = self.make_lane_y_mono_inc(pruned_lane)
# print(pruned_lane.shape)
if pruned_lane.shape[0] < 2:
continue
x_values, z_values, visibility_vec = self.resample_laneline_in_y(pruned_lane,
self.anchor_y_steps,
out_vis=True)
x_values = x_values[visibility_vec]
z_values = z_values[visibility_vec]
y_values = np.array(self.anchor_y_steps)[visibility_vec]
pruned_lane = np.stack([x_values, y_values, z_values], axis=-1)
# print(pruned_lane.shape);exit()
if pruned_lane.shape[0] > 1:
gt_lanes.append(pruned_lane)
# save the gt 3d lanes
gt_3dlanes = deepcopy(gt_lanes)
# convert 3d lanes to flat ground space x_bar y_bar Z (meter i think)
self.convert_lanes_3d_to_gflat(gt_lanes, P_g2gflat)
gflatlanes = []
real_gt_3dlanes = []
for i in range(len(gt_lanes)):
gflatlane = gt_lanes[i]
gt_3dlane = gt_3dlanes[i]
valid_indices = np.logical_and(np.logical_and(gflatlane[:, 1] > 0, gflatlane[:, 1] < 200),
np.logical_and(gflatlane[:, 0] > 3 * self.x_min, gflatlane[:, 0] < 3 * self.x_max))
gflatlane = gflatlane[valid_indices, ...]
gt_3dlane = gt_3dlane[valid_indices, ...]
if gflatlane.shape[0] < 2 or np.sum(np.logical_and(gflatlane[:, 0] > self.x_min, gflatlane[:, 0] < self.x_max)) < 2:
continue
gflatlanes.append(gflatlane)
real_gt_3dlanes.append(gt_3dlane)
P_gt = np.matmul(self.H_crop_im, H_g2im)
P_gt = np.matmul(aug_mat, P_gt)
lanes = []
for i in range(len(gflatlanes)):
gflatlane = gflatlanes[i]
x_2d, y_2d = self.homographic_transformation(P_gt, gflatlane[:, 0], gflatlane[:, 1])
assert gflatlane.shape[0] == x_2d.shape[0]
assert x_2d.shape[0] == y_2d.shape[0]
# lanes.append([(x, y) for (x, y) in zip(x_2d, y_2d) if x >= 0])
lanes.append([(x, y) for (x, y) in zip(x_2d, y_2d)])
lanes = [lane for lane in lanes if len(lane) > 0]
if not len(lanes):
continue
self._image_file.append(image_path)
self._image_ids.append(image_id)
max_2dlanes = max(max_2dlanes, len(lanes))
self.max_2dlanes = max_2dlanes
max_gflatlanes = max(max_gflatlanes, len(gflatlanes))
self.max_gflatlanes = max_gflatlanes
max_3dlanes = max(max_3dlanes, len(real_gt_3dlanes))
self.max_3dlanes = max_3dlanes
self.max_2dpoints = max(self.max_2dpoints, max([len(l) for l in lanes]))
self.max_gflatpoints = max(self.max_gflatpoints, max([len(l) for l in gflatlanes]))
self.max_3dpoints = max(self.max_3dpoints, max([len(l) for l in real_gt_3dlanes]))
self.X3d[1] = max(self.X3d[1], max([np.max(l[:, 0]) for l in real_gt_3dlanes]))
self.X3d[0] = min(self.X3d[0], min([np.min(l[:, 0]) for l in real_gt_3dlanes]))
self.Y3d[1] = max(self.Y3d[1], max([np.max(l[:, 1]) for l in real_gt_3dlanes]))
self.Y3d[0] = min(self.Y3d[0], min([np.min(l[:, 1]) for l in real_gt_3dlanes]))
self.Z3d[1] = max(self.Z3d[1], max([np.max(l[:, 2]) for l in real_gt_3dlanes]))
self.Z3d[0] = min(self.Z3d[0], min([np.min(l[:, 2]) for l in real_gt_3dlanes]))
self.Xgflat[1] = max(self.Xgflat[1], max([np.max(l[:, 0]) for l in gflatlanes]))
self.Xgflat[0] = min(self.Xgflat[0], min([np.min(l[:, 0]) for l in gflatlanes]))
self.Ygflat[1] = max(self.Ygflat[1], max([ | np.max(l[:, 1]) | numpy.max |
"""Test root level functions in metarl."""
import csv
import math
import tempfile
import akro
import dowel
from dowel import logger, tabular
import numpy as np
import pytest
import tensorflow as tf
import torch
from metarl import _Default, make_optimizer
from metarl import log_multitask_performance, log_performance, TrajectoryBatch
from metarl.envs import EnvSpec
from tests.fixtures import TfGraphTestCase
@pytest.mark.serial
def test_log_performance():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(
EnvSpec(akro.Box(np.array([0., 0., 0.]), np.array([1., 1., 1.])),
akro.Box(np.array([-1., -1.]), np.array([0., 0.]))),
observations=np.ones((sum(lengths), 3), dtype=np.float32),
last_observations=np.ones((len(lengths), 3), dtype=np.float32),
actions=np.zeros((sum(lengths), 2), dtype=np.float32),
rewards=np.array([
0.34026529, 0.58263177, 0.84307509, 0.97651095, 0.81723901,
0.22631398, 0.03421301, 0.97515046, 0.64311832, 0.65068933,
0.17657714, 0.04783857, 0.73904013, 0.41364329, 0.52235551,
0.24203526, 0.43328910
]),
terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
dtype=bool),
env_infos={
'success':
np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
dtype=bool)
},
agent_infos={},
lengths=lengths)
log_file = tempfile.NamedTemporaryFile()
csv_output = dowel.CsvOutput(log_file.name)
logger.add_output(csv_output)
log_performance(7, batch, 0.8, prefix='test_log_performance')
logger.log(tabular)
logger.dump_output_type(dowel.CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
res = {k: float(r) for (k, r) in rows[0].items()}
assert res['test_log_performance/Iteration'] == 7
assert res['test_log_performance/NumTrajs'] == 4
assert math.isclose(res['test_log_performance/SuccessRate'], 0.75)
assert math.isclose(res['test_log_performance/CompletionRate'], 0.5)
assert math.isclose(res['test_log_performance/AverageDiscountedReturn'],
1.1131040640673113)
assert math.isclose(res['test_log_performance/AverageReturn'],
2.1659965525)
assert math.isclose(res['test_log_performance/StdReturn'],
2.354067152038576)
@pytest.mark.serial
def test_log_multitask_performance_task_name():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(
EnvSpec(akro.Box( | np.array([0., 0., 0.]) | numpy.array |
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from oracle.models import rf_model
from utils import *
from metrics.abcd import abcd
from mklaren.kernel.kinterface import Kinterface
from mklaren.kernel.kernel import *
from mklaren.projection.icd import ICD
from pdb import set_trace
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas
from tabulate import tabulate
from datasets.handler import get_all_datasets
def get_kernel_matrix(dframe, n_dim=15):
"""
This returns a Kernel Transformation Matrix $\Theta$
It uses kernel approximation offered by the MKlaren package
For the sake of completeness (and for my peace of mind, I use the best possible approx.)
:param dframe: input data as a pandas dataframe.
:param n_dim: Number of dimensions for the kernel matrix (default=15)
:return: $\Theta$ matrix
"""
ker = Kinterface(data=dframe.values, kernel=linear_kernel)
model = ICD(rank=n_dim)
model.fit(ker)
g_nystrom = model.G
return g_nystrom
def map_transform(src, tgt, n_components=2):
"""
Run a map and transform x and y onto a new space using TCA
:param src: IID samples
:param tgt: IID samples
:return: Mapped x and y
"""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
col_name = ["Col_" + str(i) for i in xrange(n_components)]
x0 = pd.DataFrame(get_kernel_matrix(S, n_components), columns=col_name)
y0 = pd.DataFrame(get_kernel_matrix(T, n_components), columns=col_name)
x0.loc[:, src.columns[-1]] = pd.Series(src[src.columns[-1]], index=x0.index)
y0.loc[:, tgt.columns[-1]] = pd.Series(tgt[tgt.columns[-1]], index=y0.index)
return x0, y0
def get_dcv(src, tgt):
"""Get dataset characteristic vector."""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
def self_dist_mtx(arr):
dist_arr = pdist(arr)
return squareform(dist_arr)
dist_src = self_dist_mtx(S.values)
dist_tgt = self_dist_mtx(T.values)
dcv_src = [np.mean(dist_src), np.median(dist_src), np.min(dist_src), np.max(dist_src), np.std(dist_src),
len(S.values)]
dcv_tgt = [np.mean(dist_tgt), np.median(dist_tgt), np.min(dist_tgt), np.max(dist_tgt), | np.std(dist_tgt) | numpy.std |
import random
from abc import ABC, abstractmethod
import numpy as np
from tqdm import tqdm
from cellular_algorithm import Grid
class Evolution(ABC):
def __init__(
self,
crossover,
mutation,
selection,
succession,
boundaries,
function,
maximize=True,
mutation_probability=1,
iterations=100,
parents_num=2,
population_shape=(1, 100),
population=None,
):
"""
Arguments:
crossover: crossover that will be used to create new individuals
mutation: type of mutation that will be used to modify new individuals
selection: type of mutation
succession: type of succession
boundaries: describes range of possible solutions
eg. ((0, 10), (100, 200), (3, 15)) =>
0 < x < 10, 100 < y < 200, 3 < z < 15
function: function that will be optimized
maximize: if function should be maximized (if not, it will be minimized)
mutation_probability: probability of mutation
iterations: number of iterations
parents_num: number of parents that will be used to create new individual
population_shape: shape of the grid
- (1, population_num) - for classic evolution
- (n_1, ..., n_x) - for cellular evolution
population - population
"""
self.crossover = crossover
self.selection = selection
self.succession = succession
self.mutation = mutation
self.boundaries = boundaries
self.function = function
self.maximize = maximize
self.mutation_probability = mutation_probability
self.iterations = iterations
self.parents_num = parents_num
if not population and not population_shape:
raise ValueError("You need to specify `grid` or `shape` to create it.")
if not population:
population = Grid(population_shape)
population.generate_individuals(self.boundaries, self.function)
self.population = population
self.population_shape = self.population.grid.shape
# Create tmp grid that will be used in each iteration
self.offsprings = Grid(self.population_shape)
self.best_solution = None
self.best_solution_position = None
def get_best(self, individuals):
if self.maximize:
return max(*individuals, key=lambda x: x.fitness)
else:
return min(*individuals, key=lambda x: x.fitness)
def update_best_solution(self, individual, position):
if (
not self.best_solution
or self.get_best([self.best_solution, individual]) is individual
):
self.best_solution = individual
self.best_solution_position = position
def select_parents(self, individuals):
"""Selection.
Arguments:
individuals: population or part of the population that we want to select
parents from.
"""
return self.selection.select(individuals, self.maximize, num=self.parents_num)
def recombine(self, parents):
"""Crossover.
Recombine selected parents to create new individual.
Arguments:
parents: list of parents to recombine. There should be exactly 2 parents
Return:
newly created individual
"""
return self.crossover.recombine(*parents)
def normalize_coordinates(self, individual):
"""Make sure that individual's coordinates meet boundaries."""
result = []
for boundary, coordinate in zip(self.boundaries, individual.coordinates):
low, high = boundary
coordinate = coordinate if coordinate >= low else float(low)
coordinate = coordinate if coordinate <= high else float(high)
result.append(coordinate)
individual.coordinates = np.array(result)
return individual
def mutate(self, new_individual):
"""Mutation."""
return self.mutation.mutate(new_individual)
def get_population_coordinates(self):
return [
(*individual.coordinates, individual.fitness)
for _, individual in self.population.iterate_individuals()
]
@abstractmethod
def choose_next_population(self):
"""Succession."""
...
@abstractmethod
def run_single_iteration(self):
...
def run(self, save_trace=False):
"""Run evolution."""
population_trace = None
if save_trace:
population_trace = [self.get_population_coordinates()]
for iteration in tqdm(range(self.iterations)):
self.run_single_iteration()
if save_trace:
population_trace.append(self.get_population_coordinates())
return population_trace
class EvolutionaryAlgorithm(Evolution):
def __init__(self, crossover_probability=1, *args, **kwargs):
"""
Arguments:
crossover_probability: probability of the crossover
"""
super(EvolutionaryAlgorithm, self).__init__(*args, **kwargs)
self.crossover_probability = crossover_probability
def choose_next_population(self):
"""Succession."""
current_population = self.population.get_all_individuals()
next_population = self.succession.select(
current_population,
self.offsprings.get_all_individuals(),
self.maximize,
len(current_population),
)
self.population.grid = np.reshape(next_population, self.population_shape)
def run_single_iteration(self):
for grid_position, individual in self.population.iterate_individuals():
# Selection and crossover
if random.uniform(0, 1) < self.crossover_probability:
parents = self.select_parents(self.population.get_all_individuals())
new_individual = self.recombine(parents)
else:
new_individual = self.population.get_random_individual()
# Mutation
if random.uniform(0, 1) < self.mutation_probability:
new_individual = self.mutate(new_individual)
# Normalization and fitness computation
new_individual = self.normalize_coordinates(new_individual)
new_individual.fitness = self.function(new_individual.coordinates)
self.offsprings.set_individual(new_individual, grid_position)
self.update_best_solution(new_individual, grid_position)
# Succession
self.choose_next_population()
class CellularEvolutionaryAlgorithm(Evolution):
def __init__(self, neighbourhood, *args, **kwargs):
"""
Arguments:
neighbourhood: describes type of neighbourhood
"""
super(CellularEvolutionaryAlgorithm, self).__init__(*args, **kwargs)
self.neighbourhood = neighbourhood
def select_parents(self, grid_position):
"""Selection.
Select individual's neighbours that will be used to create new individual.
Arguments:
grid_position: individual's position on the grid.
Return:
List of neighbours that will be used to create new individual.
"""
positions = self.neighbourhood.get_neighbours(
self.population_shape, grid_position
)
neighbours = self.population.get_individuals(positions)
return super().select_parents(neighbours)
def choose_next_population(self):
"""Succession.
For each cell choose better individual.
Arguments:
offsprings: grid with newly create individuals
"""
for individual_info, offspring_info in zip(
self.population.iterate_individuals(),
self.offsprings.iterate_individuals(),
):
position, individual = individual_info
_, offspring = offspring_info
result = self.succession.select(
np.array([individual]), | np.array([offspring]) | numpy.array |
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import pytest
import os
import copy
import numpy as np
from numpy.testing import assert_allclose
from hyperion.utils import Utt2Info
from hyperion.io import H5DataWriter
from hyperion.generators.sequence_batch_generator_v1 import SequenceBatchGeneratorV1 as SBG
output_dir = './tests/data_out/generators'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
h5_file = output_dir + '/seqbg.h5'
key_file = output_dir + '/seqbg.scp'
num_seqs = 10
dim = 2
min_seq_length = 100
delta = 10
max_seq_length = min_seq_length + (num_seqs-1)*delta
seq_lengths = np.arange(100, max_seq_length+1, delta)
def create_dataset():
file_path = [str(k) for k in xrange(num_seqs)]
key=[]
i = 0
j = 0
while i < num_seqs:
key_i = (j+1)*str(j)
i += (i+1)
j += 1
key += key_i
key = key[:num_seqs]
u2c = Utt2Info.create(file_path, key)
if os.path.exists(h5_file):
return u2c
u2c.save(key_file, sep=' ')
h = H5DataWriter(h5_file)
rng = np.random.RandomState(seed=0)
for i in xrange(num_seqs):
x_i = rng.randn(seq_lengths[i], dim)
h.write(file_path[i], x_i)
return u2c
def test_num_seqs():
create_dataset()
sr = SBG(h5_file, key_file)
assert sr.num_seqs == num_seqs
def test_seq_lengths():
create_dataset()
sr = SBG(h5_file, key_file, shuffle_seqs=False)
assert np.all(sr.seq_lengths == seq_lengths)
assert sr.total_length==np.sum(seq_lengths)
assert sr.min_seq_length == min_seq_length
assert sr.max_seq_length == max_seq_length
def test_num_total_subseqs():
create_dataset()
sr = SBG(h5_file, key_file, gen_method='full_seqs', batch_size=5)
sr.num_total_subseqs == num_seqs
def test_prune_min_length():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5, shuffle_seqs=False,
prune_min_length=min_seq_length+5)
assert sr.num_seqs==num_seqs - 1
assert np.all(sr.seq_lengths==seq_lengths[1:])
assert sr.total_length== | np.sum(seq_lengths[1:]) | numpy.sum |
import numpy as np
from scipy.linalg import block_diag
from scipy import optimize
import cvxopt
# Actions:
# 0: insert tail wing in body
# 1: screw tail wing to body
# 2: insert main wing in body
# 3: screw main wing to body
# 4: insert wing tip in main wing
# 5: screw propeller to base
# 6: screw propeller cap to base
# 7: screw base to body
# 8: attach bombs to wingtip
act = [0, 1, 2, 3, 4, 5, 6, 7, 8]
# Feature matrices (rewards):
phi_p = np.array([[1.0, 1.0, 1.0, 1.0, 0.9, 0.0, 0.0, 1.0, 0.9],
[1.0, 1.0, 1.0, 1.0, 0.9, 0.0, 0.0, 1.0, 0.9],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.9],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.9],
[0.9, 0.9, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[0.9, 0.9, 0.9, 0.9, 1.0, 0.0, 0.0, 0.0, 1.0]]) # part
phi_t = np.array([[1, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0, 1, 1]]) # tool
phi_m = np.array([[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]]) # motion
phi_l = np.array([[1.0, 1.0, 0.8, 0.8, 0.8, 0.5, 0.5, 0.5, 0.8],
[1.0, 1.0, 0.8, 0.8, 0.8, 0.5, 0.5, 0.5, 0.8],
[0.8, 0.8, 1.0, 1.0, 1.0, 0.3, 0.3, 0.3, 1.0],
[0.8, 0.8, 1.0, 1.0, 1.0, 0.3, 0.3, 0.3, 1.0],
[0.8, 0.8, 1.0, 1.0, 1.0, 0.3, 0.3, 0.3, 1.0],
[0.5, 0.5, 0.3, 0.3, 0.3, 1.0, 1.0, 1.0, 0.3],
[0.5, 0.5, 0.3, 0.3, 0.3, 1.0, 1.0, 1.0, 0.3],
[0.5, 0.5, 0.3, 0.3, 0.3, 1.0, 1.0, 1.0, 0.3],
[0.8, 0.8, 1.0, 1.0, 1.0, 0.3, 0.3, 0.3, 1.0]]) # location
phi_e = np.array([[1.0, 0.8, 1.0, 0.8, 1.0, 0.2, 0.8, 1.0, 1.0],
[0.8, 1.0, 0.8, 1.0, 0.8, 0.4, 1.0, 0.8, 0.8],
[1.0, 0.8, 1.0, 0.8, 1.0, 0.2, 0.8, 1.0, 1.0],
[0.8, 1.0, 0.8, 1.0, 0.8, 0.4, 1.0, 0.8, 0.8],
[1.0, 0.8, 1.0, 0.8, 1.0, 0.2, 0.8, 1.0, 1.0],
[0.2, 0.4, 0.2, 0.4, 0.2, 1.0, 0.4, 0.2, 0.2],
[0.8, 1.0, 0.8, 1.0, 0.8, 0.4, 1.0, 0.8, 0.8],
[1.0, 0.8, 1.0, 0.8, 1.0, 0.2, 0.8, 1.0, 1.0],
[1.0, 0.8, 1.0, 0.8, 1.0, 0.2, 0.8, 1.0, 1.0]]) # effort
# Preconditions (transitions)
# T = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
# [1, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [1, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 1, 0, 0, 0],
# [0, 0, 0, 0, 0, 1, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0]])
T = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
# Demonstration
demo = [0, 2, 4, 1, 3, 5, 6, 7, 8]
# demo = [1, 2, 3, 4, 5, 9, 6, 7, 8]
num_d = len(demo) - 1
# Max margin
A = []
S = []
for i in range(num_d):
prev = demo[i]
next = demo[i+1]
candidates = demo[i+2:]
for other in candidates:
t = np.argwhere(T[other,:])
if t.size == 0 or t in demo[:i+1]:
a = [-phi_p[prev,next]+phi_p[prev,other],
-phi_t[prev,next]+phi_t[prev,other],
-phi_m[prev,next]+phi_m[prev,other],
-phi_l[prev,next]+phi_l[prev,other],
-phi_e[prev,next]+phi_e[prev,other]]
s = np.zeros(num_d-1)
s[i] = -1
A.append(a)
S.append(s)
A = np.array(A)
S = np.array(S)
_, n_w = A.shape
_, n_b = S.shape
W = np.hstack((-1*np.eye(n_w), np.zeros((n_w, n_b))))
A = np.hstack((A, S))
# MATLAB % [A_new, ia, ic] = unique(A, 'rows', 'stable');
# MATLAB % S_new = S(ia, :);
# MATLAB % A = [A_new, S_new];
n_con, n_x = A.shape
C = 3.5
H = np.eye(5)
Hs = 2*C*np.eye(num_d-1)
H = block_diag(H, Hs)
f = np.zeros((1, n_x))
b = -1*np.ones((n_con, 1))
b_W = np.zeros((n_w, 1))
# MATLAB % x = quadprog(H,f,A,b)
# MATLAB x = quadprog(H,f,[A; W],[b; b_W]) % uses 'interior-point-convex' algorithm by default (https://www.mathworks.com/help/optim/ug/quadprog.html)
b_stack = np.vstack((b, b_W))
A_stack = np.vstack((A, W))
# # Doesn't work, gives all zero result
# x0 = np.random.randn(n_x,1)
# def fun(x):
# return 0.5 * np.dot(x.T, np.dot(H, x)) + np.dot(f, x)
# cons = [{'type':'ineq', 'fun':lambda x: b_stack[i] - np.dot(A_stack[i], x)}
# for i in range(b_stack.shape[0])]
# result = optimize.minimize(fun, x0, constraints=cons)
# x = result['x']
# Using interior-point algorithms (http://cvxopt.org/documentation/index.html#technical-documentation)
cvxopt.solvers.options['show_progress'] = False
x = cvxopt.solvers.qp(cvxopt.matrix(H), cvxopt.matrix(f.T), cvxopt.matrix(A_stack), cvxopt.matrix(b_stack))['x']
x = np.array(x)
print(x)
# Predict
w = x[:5]
candidates = set(act)
pred = []
prev = 0
candidates.remove(prev)
while not len(candidates)==0:
pred.append(prev)
r_max = -100
for other in candidates:
t = | np.argwhere(T[other,:]) | numpy.argwhere |
import os
import sys
import numpy as np
from time import sleep
from tqdm import tqdm
import json
BASE_DIR = os.path.abspath('')
sys.path.append(BASE_DIR)
# ROOT_DIR = BASE_DIR
ROOT_DIR = os.path.join(BASE_DIR, os.pardir)
DATA_DIR = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class data_handler(object):
"""
This class helps to load .txt files and save them as .npy files (much faster to load).
~~~~~~~~~~~~~~~~ CURRENTLY ONLY TESTED WITH THE MODELNET40 DATASET ~~~~~~~~~~~~~~~~~~~~~~~~~
"""
def __init__(self, load, save, limit=100):
"""
load - string: file to load
save - string: file save name
limit - int: how many files to load per set
"""
self.load = load
self.save = save
self.limit = limit
cat_file = os.path.join(DATA_DIR, 'modelnet40_shape_names.txt')
cat = [line.rstrip() for line in open(cat_file)]
self.classes = dict(zip(cat, range(len(cat))))
self.point_set = np.array([])
self.class_set = np.array([])
def load_file(self):
load_file = os.path.join(DATA_DIR, self.load)
shape_ids = [line.rstrip() for line in open(load_file)]
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids] # this gets the objects names
datapath = [(shape_names[i], os.path.join(DATA_DIR, shape_names[i], shape_ids[i])+'.txt') for i in range(len(shape_ids))]
d_size = len(datapath)
curr_limit = min(d_size, self.limit)
fn1 = datapath[0]
# print(fn1)
point_set = np.loadtxt(fn1[1], delimiter=',').astype(np.float32)
class_set = self.classes[fn1[0]]
class_set = np.array([class_set]).astype(np.int32)
class_set = np.full([point_set.shape[0], 1], class_set)
print(point_set.shape)
for i in tqdm(range(1, curr_limit)):
fn = datapath[i]
cls = self.classes[datapath[i][0]]
cls = np.array([cls]).astype(np.int32)
curr_file_data = np.loadtxt(fn[1], delimiter=',').astype(np.float32)
class_set = np.append(class_set, np.full([curr_file_data.shape[0],1], cls), axis=0)
point_set = | np.append(point_set, curr_file_data, axis=0) | numpy.append |
from __future__ import absolute_import, division
import numpy as np
import cv2
from glob import glob
import os
import pickle
from torch.utils.data import Dataset
class Got10kCropped(Dataset):
def __init__(self, dataset_path, transforms=None,
pair_per_seq=1):
super(Got10kCropped, self).__init__()
self.transforms = transforms
self.pairs_per_seq = pair_per_seq
# 读取数据集所包含的视频序列,元数据,噪声标签,目标在搜索图像中的长宽比例
with open(os.path.join(dataset_path, 'list.txt')) as f:
seqs = f.readlines()
seqs = [os.path.join(dataset_path, x.replace('\n','')) for x in seqs]
self.seqs = seqs
# 加载视频序列的元数据
# meta_data = []
# meta_data_names = [os.path.join(x, 'meta_data.txt') for x in self.seqs]
# for meta_data_name in meta_data_names:
# with open(meta_data_name, 'rb') as f:
# meta_data.append( pickle.load(f) )
# self.meta_data = meta_data
# # 加载视频序列的标签
# noisy_label = []
# noisy_label_names = [os.path.join(x, 'noisy_label.txt') for x in self.seqs]
# for noisy_label_name in noisy_label_names:
# with open(noisy_label_name, 'rb') as f:
# noisy_label.append(pickle.load(f))
# self.noisy_label = noisy_label
#
# # 加载目标在搜索图像中的长宽比例
# target_wh = []
# target_wh_names = [os.path.join(x, 'target_wh.txt') for x in self.seqs]
# for target_wh_name in target_wh_names:
# with open(target_wh_name, 'rb') as f:
# target_wh.append(pickle.load(f))
# self.target_wh = target_wh
print('loading metadata from:'+os.path.join(dataset_path, 'got10k_meta.pckl')+'\n')
with open(os.path.join(dataset_path, 'got10k_meta.pckl'), 'rb') as f:
got10k_meta = pickle.load(f)
self.meta_data = got10k_meta['meta_data']
self.noisy_label = got10k_meta['noisy_label']
self.target_wh = got10k_meta['target_wh']
self.indices = np.random.permutation(len(self.seqs))
def __getitem__(self, index):
index = self.indices[index % len(self.indices)] # 获得传入视频索引
img_files = glob(os.path.join(self.seqs[index], '*.jpg'))
noisy_label = self.noisy_label[index]
meta = self.meta_data[index]
target_wh = self.target_wh[index]
# 获得滤除噪声序列后的视频序列标签。
# with open(noisy_label, 'rb') as f:
# noisy_label = pickle.load(f)
val_indices = | np.logical_and.reduce(noisy_label) | numpy.logical_and.reduce |
"""Script for multi-gpu training."""
import json
import os
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from tensorboardX import SummaryWriter
from tqdm import tqdm
from alphapose.models import builder
from alphapose.opt import cfg, logger, opt
from alphapose.utils.logger import board_writing, debug_writing
from alphapose.utils.metrics import DataLogger, calc_accuracy, calc_integral_accuracy, evaluate_mAP
from alphapose.utils.transforms import get_func_heatmap_to_coord, _integral_tensor,get_box_for_align,integral_op,get_affine_transform,affine_transform,transform_preds
from alphapose.models.criterion import IngetralCoordinate
num_gpu = torch.cuda.device_count()
valid_batch = 1 * num_gpu
if opt.sync:
norm_layer = nn.SyncBatchNorm
else:
norm_layer = nn.BatchNorm2d
def train(opt, train_loader, m, criterion, optimizer, writer):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
num_joints = cfg.DATA_PRESET.get('NUM_JOINTS',133)
train_branch = cfg.OTHERS.get('TRAIN_BRANCH',True)
train_loader = tqdm(train_loader, dynamic_ncols=True)
for i, (inps, labels, label_masks, _, bboxes) in enumerate(train_loader):
if isinstance(inps, list):
inps = [inp.cuda().requires_grad_() for inp in inps]
else:
inps = inps.cuda().requires_grad_()
out, feature = m(inps)
# train for finer hands
if train_branch:
out = m.module.forward_branch(out,feature,bboxes[:,1,:],bboxes[:,2,:])
labels = labels[:,:-68*2].cuda()
label_masks = label_masks[:,:-68*2].cuda()
else:
labels = labels[:,:133*2].cuda()
label_masks = label_masks[:,:133*2].cuda()
loss = criterion(out, labels, label_masks)
acc = calc_integral_accuracy(out, labels, label_masks, output_3d=False, norm_type=norm_type)
if isinstance(inps, list):
batch_size = inps[0].size(0)
else:
batch_size = inps.size(0)
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(writer, loss_logger.avg, acc_logger.avg, opt.trainIters, 'Train')
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output, labels, inps, opt.trainIters)
# TQDM
train_loader.set_description(
'loss: {loss:.8f} | acc: {acc:.4f}'.format(
loss=loss_logger.avg,
acc=acc_logger.avg)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def validate(m, opt, heatmap_to_coord, batch_size=20):
det_dataset = builder.build_dataset(cfg.DATASET.TEST, preset_cfg=cfg.DATA_PRESET, train=False, opt=opt)
det_loader = torch.utils.data.DataLoader(
det_dataset, batch_size=batch_size, shuffle=False, num_workers=20, drop_last=False)
kpt_json = []
eval_joints = det_dataset.EVAL_JOINTS
test_branch = cfg.OTHERS.get('TEST_BRANCH',True)
m.eval()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
hm_size = cfg.DATA_PRESET.HEATMAP_SIZE
for inps, crop_bboxes, bboxes, img_ids, scores, imghts, imgwds in tqdm(det_loader, dynamic_ncols=True):
if isinstance(inps, list):
inps = [inp.cuda() for inp in inps]
else:
inps = inps.cuda()
output,_ = m(inps,crop_bboxes[:,1,:],crop_bboxes[:,2,:],crop_bboxes[:,3,:])
pred = output
assert pred.dim() == 4
pred = pred[:, eval_joints, :, :]
for i in range(output.shape[0]):
bbox = crop_bboxes[i][0].tolist()
pose_coords, pose_scores = heatmap_to_coord(
pred[i][det_dataset.EVAL_JOINTS], bbox, hm_shape=hm_size, norm_type=norm_type)
keypoints = np.concatenate((pose_coords, pose_scores), axis=1)
keypoints = keypoints.reshape(-1).tolist()
data = dict()
#data['bbox'] = bboxes[i, 0].tolist()
data['bbox'] = bbox
data['image_id'] = int(img_ids[i])
data['score'] = float(scores[i] + np.mean(pose_scores) + np.max(pose_scores))
data['category_id'] = 1
data['keypoints'] = keypoints
kpt_json.append(data)
with open(os.path.join(opt.work_dir, 'test_kpt.json'), 'w') as fid:
json.dump(kpt_json, fid)
res = evaluate_mAP(os.path.join(opt.work_dir, 'test_kpt.json'), ann_type='keypoints', ann_file='/ssd3/Benchmark/coco/annotations/coco_wholebody_val_133.json')#ann_file=os.path.join(cfg.DATASET.VAL.ROOT, cfg.DATASET.VAL.ANN))
return res
def validate_gt(m, opt, cfg, heatmap_to_coord, batch_size=20):
gt_val_dataset = builder.build_dataset(cfg.DATASET.VAL, preset_cfg=cfg.DATA_PRESET, train=False)
eval_joints = gt_val_dataset.EVAL_JOINTS
test_branch = cfg.OTHERS.get('TEST_BRANCH',True)
gt_val_loader = torch.utils.data.DataLoader(
gt_val_dataset, batch_size=batch_size, shuffle=False, num_workers=20, drop_last=False)
kpt_json = []
kpt_json_branch = []
m.eval()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
hm_size = cfg.DATA_PRESET.HEATMAP_SIZE
for inps, labels, label_masks, img_ids, bboxes in tqdm(gt_val_loader, dynamic_ncols=True):
if isinstance(inps, list):
inps = [inp.cuda() for inp in inps]
else:
inps = inps.cuda()
output,feature = m(inps)
pred = copy.deepcopy(output)
assert pred.dim() == 4
pred = pred[:, eval_joints, :, :]
for i in range(output.shape[0]):
bbox = bboxes[i][0].tolist()
pose_coords, pose_scores = heatmap_to_coord(
pred[i][gt_val_dataset.EVAL_JOINTS], bbox, hm_shape=hm_size, norm_type=norm_type)
keypoints = np.concatenate((pose_coords, pose_scores), axis=1)
keypoints = keypoints.reshape(-1).tolist()
data = dict()
#data['bbox'] = bboxes[i, 0].tolist()
data['bbox'] = bbox
data['image_id'] = int(img_ids[i])
data['score'] = float(np.mean(pose_scores) + np.max(pose_scores))
data['category_id'] = 1
data['keypoints'] = keypoints
kpt_json.append(data)
if test_branch:
hm_height, hm_width = hm_size
# regression the joints of wholeboy in stage1
pred_jts, pred_score = _integral_tensor(
pred, 133, False, hm_width, hm_height, 1, integral_operation=integral_op, norm_type='sigmoid')
pred_jts = pred_jts.reshape(pred_jts.shape[0], 133, 2)
# get the coords with the size of heatmap
coords_x = (pred_jts[:, :, 0] + 0.5) * hm_width
coords_y = (pred_jts[:, :, 1] + 0.5) * hm_height
# get the box of hands for roi align
lefthand_boxes = get_box_for_align(coords_x[:,-42:-21],coords_y[:,-42:-21])
righthand_boxes = get_box_for_align(coords_x[:,-21:],coords_y[:,-21:])
# stage2 testing
fine_out = m.forward_branch(output, feature, lefthand_boxes, righthand_boxes)
# output contains the finer and amplified hands kpts, need to apply aff
fine_pred_jts, fine_pred_score = _integral_tensor(
fine_out[:,-42:,:,:], 42, False, hm_width, hm_height, 1, integral_operation=integral_op, norm_type='sigmoid')
fine_pred_jts = fine_pred_jts.reshape(fine_pred_jts.shape[0], 42, 2)
lefthand_jts = fine_pred_jts[:,:21,:]
righthand_jts = fine_pred_jts[:,21:,:]
lefthand_jts[:,:,0] = (lefthand_jts[:,:,0]+0.5)*hm_width
lefthand_jts[:,:,1] = (lefthand_jts[:,:,1]+0.5)*hm_height
righthand_jts[:,:,0] = (righthand_jts[:,:,0]+0.5)*hm_width
righthand_jts[:,:,1] = (righthand_jts[:,:,1]+0.5)*hm_height
center_hm = np.array([hm_width/2.0,hm_height/2.0])
scale_hm = np.array([hm_size[1],hm_size[0]])
lefthand_kpts = copy.deepcopy(lefthand_jts.cpu().numpy().astype(np.float32))
righthand_kpts = copy.deepcopy(righthand_jts.cpu().numpy().astype(np.float32))
# apply affine trans to lefthand and add offset
for j in range(lefthand_jts.shape[0]):
box = lefthand_boxes[j].tolist()
width = np.array(box[2] - box[0])
height = np.array(box[3] - box[1])
output_size = [box[2]-box[0],box[3]-box[1]]
offset = np.array([box[0],box[1]])
trans = get_affine_transform(center_hm,scale_hm,0,output_size)
for k in range(21):
lefthand_kpts[j ,k, 0:2] = affine_transform(lefthand_kpts[j ,k, 0:2], trans)
lefthand_kpts[j,:,0] = (lefthand_kpts[j,:,0]) + offset[0]
lefthand_kpts[j,:,1] = (lefthand_kpts[j,:,1])+ offset[1]
#--------------------------------------------------
# apply affine trans to righthand and add offset
for j in range(righthand_jts.shape[0]):
box = righthand_boxes[j].tolist()
width = np.array(box[2] - box[0])
height = np.array(box[3] - box[1])
output_size = [box[2]-box[0],box[3]-box[1]]
offset = np.array([box[0],box[1]])
trans = get_affine_transform(center_hm,scale_hm,0,output_size)
for k in range(21):
righthand_kpts[j,k, 0:2] = affine_transform(righthand_kpts[j ,k, 0:2], trans)
righthand_kpts[j,:,0] = (righthand_kpts[j,:,0]) + offset[0]
righthand_kpts[j,:,1] = (righthand_kpts[j,:,1]) + offset[1]
#--------------------------------------------------
bodyface_kpts = copy.deepcopy(pred_jts[:,:-42,:].cpu().numpy().astype(np.float32))
bodyface_kpts[:,:,0] = (bodyface_kpts[:,:,0]+0.5)*hm_width
bodyface_kpts[:,:,1] = (bodyface_kpts[:,:,1]+0.5)*hm_height
fine_kpts = np.concatenate((bodyface_kpts,lefthand_kpts,righthand_kpts), axis=1)
fine_socre = np.concatenate((pred_score[:,:-42,:].cpu().numpy(),fine_pred_score.cpu().numpy()), axis=1)
for n in range(output.shape[0]):
bbox = bboxes[n][0].tolist()
xmin, ymin, xmax, ymax = bbox
w = xmax - xmin
h = ymax - ymin
center = np.array([xmin + w * 0.5, ymin + h * 0.5])
scale = | np.array([w, h]) | numpy.array |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Slice(Base):
@staticmethod
def export_slice(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[0:3, 0:10]
starts = np.array([0, 0], dtype=np.int64)
ends = np.array([3, 10], dtype=np.int64)
axes = np.array([0, 1], dtype=np.int64)
steps = np.array([1, 1], dtype=np.int64)
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice')
@staticmethod
def export_slice_neg(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0], dtype=np.int64)
ends = np.array([-1], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 0:-1]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_neg')
@staticmethod
def export_slice_start_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1000], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1000:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_start_out_of_bounds')
@staticmethod
def export_slice_end_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_end_out_of_bounds')
@staticmethod
def export_slice_default_axes(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends], outputs=[y],
name='test_slice_default_axes')
@staticmethod
def export_slice_default_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
axes = np.array([0, 1, 2], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends, axes], outputs=[y],
name='test_slice_default_steps')
@staticmethod
def export_slice_neg_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([20, 10, 4], dtype=np.int64)
ends = np.array([0, 0, 1], dtype=np.int64)
axes = | np.array([0, 1, 2], dtype=np.int64) | numpy.array |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: edit_image.py
# --- Creation Date: 16-05-2021
# --- Last Modified: Fri 28 May 2021 00:08:35 AEST
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Edit an existing image.
"""
import copy
import os
from tqdm import tqdm
from time import perf_counter
import click
import re
import imageio
import pickle
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import dnnlib
import legacy
from typing import List, Optional
# from projector import project
def softmax_last_dim_fn(x):
return F.softmax(x, dim=-1)
# def double_softmax_last_dim_fn(x):
# return F.softmax(F.softmax(x, dim=-1), dim=-1)
# def sigmoid_fn(x):
# return torch.sigmoid(x) * 0.2 # rescale to balance with softmax
# def get_heat_fn(self, heat_fn_name):
# if heat_fn_name == 'softmax':
# heat_fn = softmax_last_dim_fn
# elif heat_fn_name == 'sigmoid':
# heat_fn = sigmoid_fn
# elif heat_fn_name == 'double_softmax':
# heat_fn = double_softmax_last_dim_fn
# else:
# raise ValueError('Unknown M.heat_fn:', heat_fn_name)
# return heat_fn
def project(
G,
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
*,
num_steps = 1000,
w_avg_samples = 10000,
initial_learning_rate = 0.1,
initial_noise_factor = 0.05,
lr_rampdown_length = 0.25,
lr_rampup_length = 0.05,
noise_ramp_length = 0.75,
regularize_noise_weight = 1e5,
verbose = False,
device: torch.device
):
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
def logprint(*args):
if verbose:
print(*args)
G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore
# Compute w stats.
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
# w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
w_samples = w_samples.cpu().numpy().astype(np.float32) # [N, L, C]
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, L, C]
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
# Setup noise inputs.
noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }
# Load VGG16 feature detector.
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
# Features for target image.
target_images = target.unsqueeze(0).to(device).to(torch.float32)
if target_images.shape[2] > 256:
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable
w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)
# Init noise.
for buf in noise_bufs.values():
buf[:] = torch.randn_like(buf)
buf.requires_grad = True
for step in range(num_steps):
# Learning rate schedule.
t = step / num_steps
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth images from opt_w.
w_noise = torch.randn_like(w_opt) * w_noise_scale
# ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
ws = w_opt
# ws = w_opt + w_noise
synth_images = G.synthesis(ws, noise_mode='const')
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
synth_images = (synth_images + 1) * (255/2)
if synth_images.shape[2] > 256:
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
# Features for synth images.
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
dist = (target_features - synth_features).square().sum()
# dist_pixel = (target_images - synth_images).square().sum() * 0.001
# Noise regularization.
reg_loss = 0.0
for v in noise_bufs.values():
noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()
while True:
reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2
reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2
if noise.shape[2] <= 8:
break
noise = F.avg_pool2d(noise, kernel_size=2)
# ws_avg = ws.mean(1, keepdim=True) # [1, 1, C]
# ws_var = (((ws - ws_avg) ** 2).sum() / ws.size(1))
# loss = dist
loss = dist + reg_loss * regularize_noise_weight
# loss = dist + reg_loss * regularize_noise_weight + ws_var * 0.1
# loss = dist + reg_loss * regularize_noise_weight + dist_pixel
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
# Save projected W for each optimization step.
w_out[step] = w_opt.detach()[0]
# Normalize noise.
with torch.no_grad():
for buf in noise_bufs.values():
buf -= buf.mean()
buf *= buf.square().mean().rsqrt()
# return w_out.repeat([1, G.mapping.num_ws, 1])
return w_out
#----------------------------------------------------------------------------
class CommaSeparatedIntList(click.ParamType):
name = 'int_list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return [int(x.strip()) for x in value[1:-1].split(',')]
class CommaSeparatedFloatList(click.ParamType):
name = 'float_list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return [float(x.strip()) for x in value[1:-1].split(',')]
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.option('--gan_network', help='Network pickle filename', required=True)
@click.option('--m_network', help='M Network pickle filename', required=True)
@click.option('--target', help='Target image file to project to', required=True, metavar='FILE')
@click.option('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)
@click.option('--seed', help='Random seed', type=int, default=303, show_default=True)
@click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)
@click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR')
@click.option('--edit_dims', help='The latent dim to edit', required=True, type=num_range)
@click.option('--edit_scale', help='The scale to edit', required=True, type=CommaSeparatedFloatList())
@click.option('--impact_w_layers', help='Optionally limit the impact on certain W space', default=None, type=CommaSeparatedIntList())
@click.option('--train_project', help='If training projection', type=bool, default=False, show_default=True)
@click.option('--gen_rand_image', help='If generate rand images', type=bool, default=False, show_default=True)
@click.option('--truncation_psi', help='Truncation psi in mapping net', default=0.7, type=float, show_default=True)
@click.option('--n_samples', help='Samples to show', default=5, type=int, show_default=True)
@click.option('--use_heat_max', help='If use max of heat', type=bool, default=False, show_default=True)
def run_edit(
gan_network: str,
m_network: str,
target: str,
outdir: str,
save_video: bool,
seed: int,
num_steps: int,
edit_dims: int,
edit_scale: list,
impact_w_layers: list,
train_project: bool,
gen_rand_image: bool,
truncation_psi: float,
n_samples: int,
use_heat_max: bool,
):
""" Edit an existing image by first projecting it into latent space W and then modify it
by M network with specified dimension.
"""
np.random.seed(seed)
torch.manual_seed(seed)
# Load networks.
print('Loading networks from "%s"...' % gan_network)
device = torch.device('cuda')
with dnnlib.util.open_url(gan_network) as fp:
# G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
network_dict = legacy.load_network_pkl(fp)
G = network_dict['G_ema'].requires_grad_(False).to(device) # subclass of torch.nn.Module
D = network_dict['D'].requires_grad_(False).to(device)
# Load M network.
with open(m_network, 'rb') as f:
M = pickle.load(f)['M'].requires_grad_(False).to(device)
os.makedirs(outdir, exist_ok=True)
if train_project:
# Load target image.
target_pil = PIL.Image.open(target).convert('RGB')
w, h = target_pil.size
s = min(w, h)
target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
target_uint8 = np.array(target_pil, dtype=np.uint8)
# Optimize projection.
start_time = perf_counter()
projected_w_steps = project(
G,
target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable
num_steps=num_steps,
device=device,
verbose=True
)
print (f'Elapsed: {(perf_counter()-start_time):.1f} s')
# Render debug output: optional video and projected image and W vector.
if save_video:
video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')
print (f'Saving optimization progress video "{outdir}/proj.mp4"')
for projected_w in projected_w_steps:
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
video.append_data(np.concatenate([target_uint8, synth_image], axis=1))
video.close()
# Save final projected frame and W vector.
target_pil.save(f'{outdir}/target.png')
projected_w = projected_w_steps[-1] # (num_ws, w_dim)
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')
np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())
elif gen_rand_image:
z_samples = torch.randn(n_samples, G.z_dim, device=device)
projected_w = G.mapping(z_samples, None, truncation_psi=truncation_psi) # [b, num_ws, w_dim]
else:
projected_w = | np.load(f'{outdir}/projected_w.npz') | numpy.load |
import numpy as np
import datetime
from pychunkedgraph.backend import chunkedgraph
from pychunkedgraph.backend.utils import column_keys
from multiwrapper import multiprocessing_utils as mu
from typing import Optional, Sequence
def _read_delta_root_rows_thread(args) -> list:
start_seg_id, end_seg_id, serialized_cg_info, time_stamp_start, time_stamp_end = args
cg = chunkedgraph.ChunkedGraph(**serialized_cg_info)
start_id = cg.get_node_id(segment_id=start_seg_id,
chunk_id=cg.root_chunk_id)
end_id = cg.get_node_id(segment_id=end_seg_id,
chunk_id=cg.root_chunk_id)
# apply column filters to avoid Lock columns
rows = cg.read_node_id_rows(
start_id=start_id,
start_time=time_stamp_start,
end_id=end_id,
end_id_inclusive=False,
columns=[column_keys.Hierarchy.FormerParent, column_keys.Hierarchy.NewParent],
end_time=time_stamp_end,
end_time_inclusive=True)
# new roots are those that have no NewParent in this time window
new_root_ids = [k for (k, v) in rows.items()
if column_keys.Hierarchy.NewParent not in v]
# expired roots are the IDs of FormerParent's
# whose timestamp is before the start_time
expired_root_ids = []
for k, v in rows.items():
if column_keys.Hierarchy.FormerParent in v:
fp = v[column_keys.Hierarchy.FormerParent]
for cell_entry in fp:
expired_root_ids.extend(cell_entry.value)
return new_root_ids, expired_root_ids
def _read_root_rows_thread(args) -> list:
start_seg_id, end_seg_id, serialized_cg_info, time_stamp = args
cg = chunkedgraph.ChunkedGraph(**serialized_cg_info)
start_id = cg.get_node_id(segment_id=start_seg_id,
chunk_id=cg.root_chunk_id)
end_id = cg.get_node_id(segment_id=end_seg_id,
chunk_id=cg.root_chunk_id)
rows = cg.read_node_id_rows(
start_id=start_id,
end_id=end_id,
end_id_inclusive=False,
end_time=time_stamp,
end_time_inclusive=True)
root_ids = [k for (k, v) in rows.items()
if column_keys.Hierarchy.NewParent not in v]
return root_ids
def get_latest_roots(cg,
time_stamp: Optional[datetime.datetime] = None,
n_threads: int = 1) -> Sequence[np.uint64]:
# Create filters: time and id range
max_seg_id = cg.get_max_seg_id(cg.root_chunk_id) + 1
n_blocks = np.min([n_threads * 3 + 1, max_seg_id])
seg_id_blocks = np.linspace(1, max_seg_id, n_blocks, dtype=np.uint64)
cg_serialized_info = cg.get_serialized_info()
if n_threads > 1:
del cg_serialized_info["credentials"]
multi_args = []
for i_id_block in range(0, len(seg_id_blocks) - 1):
multi_args.append([seg_id_blocks[i_id_block],
seg_id_blocks[i_id_block + 1],
cg_serialized_info, time_stamp])
# Run parallelizing
if n_threads == 1:
results = mu.multiprocess_func(_read_root_rows_thread,
multi_args, n_threads=n_threads,
verbose=False, debug=n_threads == 1)
else:
results = mu.multisubprocess_func(_read_root_rows_thread,
multi_args, n_threads=n_threads)
root_ids = []
for result in results:
root_ids.extend(result)
return np.array(root_ids, dtype=np.uint64)
def get_delta_roots(cg,
time_stamp_start: datetime.datetime,
time_stamp_end: Optional[datetime.datetime] = None,
min_seg_id: int = 1,
n_threads: int = 1) -> Sequence[np.uint64]:
# Create filters: time and id range
max_seg_id = cg.get_max_seg_id(cg.root_chunk_id) + 1
n_blocks = np.min([n_threads + 1, max_seg_id-min_seg_id+1])
seg_id_blocks = np.linspace(min_seg_id, max_seg_id, n_blocks, dtype=np.uint64)
cg_serialized_info = cg.get_serialized_info()
if n_threads > 1:
del cg_serialized_info["credentials"]
multi_args = []
for i_id_block in range(0, len(seg_id_blocks) - 1):
multi_args.append([seg_id_blocks[i_id_block],
seg_id_blocks[i_id_block + 1],
cg_serialized_info, time_stamp_start, time_stamp_end])
# Run parallelizing
if n_threads == 1:
results = mu.multiprocess_func(_read_delta_root_rows_thread,
multi_args, n_threads=n_threads,
verbose=False, debug=n_threads == 1)
else:
results = mu.multisubprocess_func(_read_delta_root_rows_thread,
multi_args, n_threads=n_threads)
# aggregate all the results together
new_root_ids = []
expired_root_id_candidates = []
for r1, r2 in results:
new_root_ids.extend(r1)
expired_root_id_candidates.extend(r2)
expired_root_id_candidates = | np.array(expired_root_id_candidates, dtype=np.uint64) | numpy.array |
"""
Test for file IO
"""
import pytest
import numpy as np
from bioptim import OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_muscle_driven_ocp(ode_solver):
bioptim_folder = TestUtils.bioptim_folder()
static_arm = TestUtils.load_module(bioptim_folder + "/examples/muscle_driven_ocp/static_arm.py")
ode_solver = ode_solver()
ocp = static_arm.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_ocp/arm26.bioMod",
final_time=2,
n_shooting=10,
weight=1,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
if isinstance(ode_solver, OdeSolver.IRK):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14351611580879933)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.94511299, 3.07048865]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.41149114, -0.55863385]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.00147561, 0.00520749]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.00027953, 0.00069257]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.29029533e-06, 1.64976642e-01, 1.00004898e-01, 4.01974257e-06, 4.13014984e-06, 1.03945583e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25940361e-03, 3.21754460e-05, 3.12984790e-05, 2.00725054e-03, 1.99993619e-03, 1.81725854e-03]),
)
elif isinstance(ode_solver, OdeSolver.RK8):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350914060136277)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.94510844, 3.07048231]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.41151235, -0.55866253]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.00147777, 0.00520795]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.00027953, 0.00069258]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.28863414e-06, 1.65011897e-01, 1.00017224e-01, 4.01934660e-06, 4.12974244e-06, 1.03954780e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25990460e-03, 3.21893307e-05, 3.13077447e-05, 2.01209936e-03, 2.00481801e-03, 1.82353344e-03]),
)
else:
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350464848810182)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.9451058, 3.0704789]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.4115254, -0.5586797]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.0014793, 0.0052082]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.0002795, 0.0006926]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.2869218e-06, 1.6503522e-01, 1.0002514e-01, 4.0190181e-06, 4.1294041e-06, 1.0396051e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.2599283e-03, 3.2188697e-05, 3.1307377e-05, 2.0121186e-03, 2.0048373e-03, 1.8235679e-03]),
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4]) # Only one solver since it is very long
def test_muscle_activations_with_contact_driven_ocp(ode_solver):
# TODO: This test should be removed when DynamicsFcn.MUSCLE_ACTIVATIONS_AND_TORQUE_DRIVEN_WITH_CONTACT is
# unitary tested
# Load static_arm_with_contact
bioptim_folder = TestUtils.bioptim_folder()
static_arm = TestUtils.load_module(bioptim_folder + "/examples/muscle_driven_ocp/static_arm_with_contact.py")
ode_solver = ode_solver()
ocp = static_arm.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_ocp/arm26_with_contact.bioMod",
final_time=2,
n_shooting=10,
weight=1,
ode_solver=ode_solver,
)
sol = ocp.solve()
if isinstance(ode_solver, OdeSolver.IRK):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14351397970185203)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.0081671, -0.94509584, 3.07047323]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.00093981, 0.41157421, -0.55870943]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.49332839e-07, 1.47494809e-03, 5.20721575e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.72476211e-06, -2.79524486e-04, 6.92600551e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.29081617e-06, 1.64961906e-01, 9.99986809e-02, 4.01995665e-06, 4.13036938e-06, 1.03940164e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25988708e-03, 3.21882769e-05, 3.13076618e-05, 2.01160287e-03, 2.00431774e-03, 1.82289866e-03]),
)
elif isinstance(ode_solver, OdeSolver.RK8):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350699571954104)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.00816709, -0.94509077, 3.07046606]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.00093983, 0.411599, -0.55874465]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.77284867e-07, 1.47710422e-03, 5.20766354e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.72484502e-06, -2.79525145e-04, 6.92616311e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.28911678e-06, 1.64996819e-01, 1.00010798e-01, 4.01956674e-06, 4.12996816e-06, 1.03949142e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25994595e-03, 3.21879960e-05, 3.13075455e-05, 2.01165125e-03, 2.00436616e-03, 1.82298538e-03]),
)
else:
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.1435025030068162)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.0081671, -0.9450881, 3.0704626]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.0009398, 0.4116121, -0.5587618]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.9652660e-07, 1.4785825e-03, 5.2079505e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.7248808e-06, -2.7952503e-04, 6.9262306e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.2873915e-06, 1.6502014e-01, 1.0001872e-01, 4.0192359e-06, 4.1296273e-06, 1.0395487e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.2599697e-03, 3.2187363e-05, 3.1307175e-05, 2.0116712e-03, 2.0043861e-03, 1.8230214e-03]),
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4]) # Only one solver since it is very long
def test_muscle_excitation_with_contact_driven_ocp(ode_solver):
# Load contact_forces_inequality_constraint_muscle_excitations
bioptim_folder = TestUtils.bioptim_folder()
contact = TestUtils.load_module(
bioptim_folder
+ "/examples/muscle_driven_with_contact/contact_forces_inequality_constraint_muscle_excitations.py"
)
boundary = 50
ode_solver = ode_solver()
ocp = contact.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_with_contact/2segments_4dof_2contacts_1muscle.bioMod",
phase_time=0.3,
n_shooting=10,
min_bound=boundary,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619)
# Check some of the results
q, qdot, mus_states, tau, mus_controls = (
sol.states["q"],
sol.states["qdot"],
sol.states["muscles"],
sol.controls["tau"],
sol.controls["muscles"],
)
if isinstance(ode_solver, OdeSolver.IRK):
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (110, 1))
np.testing.assert_almost_equal(g[:90], np.zeros((90, 1)), decimal=6)
np.testing.assert_array_less(-g[90:], -boundary)
expected_pos_g = np.array(
[
[51.5414325],
[52.77742181],
[57.57780262],
[62.62940016],
[65.1683722],
[66.33551167],
[65.82614885],
[63.06016376],
[57.23683342],
[50.47124118],
[156.35594176],
[136.1362431],
[89.86994764],
[63.41325331],
[57.493027],
[55.09716611],
[53.77813649],
[52.90987628],
[52.19502561],
[50.56093511],
]
)
np.testing.assert_almost_equal(g[90:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.0, 0.0, -0.75, 0.75]))
np.testing.assert_almost_equal(
q[:, -1], np.array([-3.40708085e-01, 1.34155553e-01, -2.22589697e-04, 2.22589697e-04])
)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(
qdot[:, -1], np.array([-2.01858700e00, 4.49316671e-04, 4.03717411e00, -4.03717411e00])
)
# initial and final muscle state
np.testing.assert_almost_equal(mus_states[:, 0], np.array([0.5]))
np.testing.assert_almost_equal(mus_states[:, -1], np.array([0.52946019]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-54.08860398]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-26.70209712]))
np.testing.assert_almost_equal(mus_controls[:, 0], np.array([0.48071638]))
np.testing.assert_almost_equal(mus_controls[:, -1], np.array([0.40159522]))
elif isinstance(ode_solver, OdeSolver.RK8):
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (110, 1))
np.testing.assert_almost_equal(g[:90], np.zeros((90, 1)), decimal=6)
np.testing.assert_array_less(-g[90:], -boundary)
expected_pos_g = np.array(
[
[51.54108548],
[52.77720093],
[57.5776414],
[62.62966321],
[65.16873337],
[66.33594321],
[65.82669791],
[63.06102595],
[57.23848183],
[50.47112677],
[156.35763657],
[136.13688244],
[89.86990489],
[63.41179686],
[57.49195628],
[55.09640086],
[53.77757475],
[52.9094631],
[52.19492485],
[50.56081268],
]
)
np.testing.assert_almost_equal(g[90:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.0, 0.0, -0.75, 0.75]))
np.testing.assert_almost_equal(
q[:, -1], np.array([-3.40708085e-01, 1.34155553e-01, -2.22589697e-04, 2.22589697e-04])
)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(
qdot[:, -1], np.array([-2.01866580e00, 4.49415846e-04, 4.03733171e00, -4.03733171e00])
)
# initial and final muscle state
np.testing.assert_almost_equal(mus_states[:, 0], np.array([0.5]))
np.testing.assert_almost_equal(mus_states[:, -1], np.array([0.5289569]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-54.0891972]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-26.7018241]))
np.testing.assert_almost_equal(mus_controls[:, 0], np.array([0.4808524]))
np.testing.assert_almost_equal(mus_controls[:, -1], np.array([0.4007721]))
else:
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (110, 1))
np.testing.assert_almost_equal(g[:90], np.zeros((90, 1)))
np.testing.assert_array_less(-g[90:], -boundary)
expected_pos_g = np.array(
[
[51.5673555],
[52.82179693],
[57.5896514],
[62.60246484],
[65.13414631],
[66.29498636],
[65.77592127],
[62.98288508],
[57.0934291],
[50.47918162],
[156.22933663],
[135.96633458],
[89.93755291],
[63.57705684],
[57.59613028],
[55.17020948],
[53.83337907],
[52.95213608],
[52.20317604],
[50.57048159],
]
)
np.testing.assert_almost_equal(g[90:], expected_pos_g, decimal=6)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.0, 0.0, -0.75, 0.75]))
np.testing.assert_almost_equal(
q[:, -1], np.array([-3.40710032e-01, 1.34155565e-01, -2.18684502e-04, 2.18684502e-04])
)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(
qdot[:, -1], np.array([-2.01607708e00, 4.40761528e-04, 4.03215433e00, -4.03215433e00])
)
# initial and final muscle state
np.testing.assert_almost_equal(mus_states[:, 0], np.array([0.5]))
np.testing.assert_almost_equal(mus_states[:, -1], np.array([0.54388439]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], | np.array([-54.04429218]) | numpy.array |
import sparse
from sparse._settings import NEP18_ENABLED
from sparse._utils import assert_eq
import numpy as np
import pytest
from hypothesis import settings, given, strategies as st
from _utils import gen_sparse_random
if not NEP18_ENABLED:
pytest.skip("NEP18 is not enabled", allow_module_level=True)
@settings(deadline=None)
@given(
func=st.sampled_from(
[
np.mean,
np.std,
np.var,
np.sum,
lambda x: np.sum(x, axis=0),
lambda x: np.transpose(x),
]
),
y=gen_sparse_random((50, 50), density=0.25),
)
def test_unary(func, y):
x = y.todense()
xx = func(x)
yy = func(y)
assert_eq(xx, yy)
@settings(deadline=None)
@given(
arg_order=st.sampled_from([(0, 1), (1, 0), (1, 1)]),
func=st.sampled_from([np.dot, np.result_type, np.tensordot, np.matmul]),
y=gen_sparse_random((50, 50), density=0.25),
)
def test_binary(func, arg_order, y):
x = y.todense()
xx = func(x, x)
args = [(x, y)[i] for i in arg_order]
yy = func(*args)
if isinstance(xx, np.ndarray):
assert_eq(xx, yy)
else:
# result_type returns a dtype
assert xx == yy
@given(y=gen_sparse_random((50, 50), density=0.25))
def test_stack(y):
"""stack(), by design, does not allow for mixed type inputs"""
x = y.todense()
xx = | np.stack([x, x]) | numpy.stack |
# packages
import os
import sys
import json
import time
import requests
import datetime
import argparse
import sseclient
import numpy as np
import multiprocessing as mpr
# plotting
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from matplotlib.colors import Normalize
# set matplotlib backend
matplotlib.use('TkAgg')
# project
import heatmap.helpers as hlp
import heatmap.miniclasses as mcl
class Director():
"""
Handles all API interfacing, including fetching sensors list and updating them.
Imports room layout and calculates euclidean distance maps during initialisation.
When new event data arrives in stream, delegate to the correct sensor for update.
"""
def __init__(self, username='', password='', project_id='', api_url_base='', t_range=[0, 40], resolution=5, cache_dir='/tmp/', pickle_id='hmap_'):
"""
Initialise Director class.
Parameters
----------
username : str
DT Studio service account key.
password : str
DT Studio service account secret.
project_id : str
DT Studio project identifier.
api_url_base : str
Endpoint for API.
t_range : [float, float]
Temperature range [min, max] used in visualization.
resolution : int
Number of points per meter in heatmap grid.
cache_dir : str
Absolute path to directory used for caching distance maps.
pickle_id : str
Identifier used for files cached in cache_dir.
"""
# give to self
self.username = username
self.password = password
self.project_id = project_id
self.api_url_base = api_url_base
self.t_range = t_range
self.resolution = resolution
self.cache_dir = cache_dir
self.pickle_id = pickle_id
# variables
self.last_update = -1
self.sample = False
self.cc = 0
# set stream endpoint
self.stream_endpoint = "{}/projects/{}/devices:stream".format(self.api_url_base, self.project_id)
# parse system arguments
self.__parse_sysargs()
# set history- and streaming filters
self.__set_filters()
# inherit rooms layout
self.__decode_json_layout()
# get limits for x- and y- axes
self.__generate_bounding_box()
# generate distance map for each sensor
if self.args['debug']:
self.__euclidean_map_debug()
else:
self.__euclidean_map_threaded()
# spawn heatmap
self.heatmap = np.zeros(shape=self.X.shape)
# check if sample is set
if self.sample:
print('\nUsing sample layout. No historic- of streaming data will be used.')
print('For this, provide a layout using the --layout argument.')
self.update_heatmap()
self.plot_heatmap(update_time='Sample Layout', show=True)
sys.exit()
def __parse_sysargs(self):
"""
Parse for command line arguments.
"""
# create parser object
parser = argparse.ArgumentParser(description='Heatmap generation on Stream and Event History.')
# get UTC time now
now = (datetime.datetime.utcnow().replace(microsecond=0)).isoformat() + 'Z'
# general arguments
parser.add_argument('--layout', metavar='', help='Json file with room layout.', required=False)
parser.add_argument('--starttime', metavar='', help='Event history UTC starttime [YYYY-MM-DDTHH:MM:SSZ].', required=False, default=now)
parser.add_argument('--endtime', metavar='', help='Event history UTC endtime [YYYY-MM-DDTHH:MM:SSZ].', required=False, default=now)
parser.add_argument('--timestep', metavar='', help='Heatmap update period.', required=False, default=3600, type=int)
# boolean flags
parser.add_argument('--no-plot', action='store_true', help='Suppress plots in stream.')
parser.add_argument('--debug', action='store_true', help='Disables multithreading for debug visualization.')
parser.add_argument('--read', action='store_true', help='Import cached distance maps.')
# convert to dictionary
self.args = vars(parser.parse_args())
# set history flag
if now == self.args['starttime']:
self.fetch_history = False
else:
self.fetch_history = True
def __new_event_data(self, event_data, cout=True):
"""
Receive new event_data json and pass it along to the correct room instance.
Parameters
----------
event_data : dictionary
Data json containing new event data.
cout : bool
Will print event information to console if True.
"""
# get id of source sensor
source_id = os.path.basename(event_data['targetName'])
# verify temperature event
if 'temperature' in event_data['data'].keys():
# check if sensor is in this room
for sensor in self.sensors + self.oofs:
if source_id == sensor.sensor_id:
# give data to room
sensor.new_event_data(event_data)
if cout: print('-- New temperature {} for {} at [{}, {}].'.format(event_data['data']['temperature']['value'], source_id, sensor.x, sensor.y))
return True
elif 'objectPresent' in event_data['data']:
# find correct door
for door in self.doors:
if source_id == door.sensor_id:
# give state to door
door.new_event_data(event_data)
if cout: print('-- New door state {} for {} at [{}, {}].'.format(event_data['data']['objectPresent']['state'], source_id, door.x, door.y))
return True
return False
def __check_timestep(self, unixtime):
"""
Check if more time than --timestep has passed since last heatmap update.
Parameters
----------
unixtime : int
Seconds since 01-Jan 1970.
Returns
-------
return : bool
True if time to update heatmap.
False if we're still waiting.
"""
# check time since last update
if self.last_update < 0:
# update time to this event time
self.last_update = unixtime
return False
elif unixtime - self.last_update > self.args['timestep']:
# update timer to this event time
self.last_update = unixtime
return True
def __decode_json_layout(self):
"""
Parse json layout file and spawn related class objects.
"""
# import json to dictionary
if self.args['layout'] != None:
path = self.args['layout']
else:
path = os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'config', 'sample_layout.json')
self.sample = True
jdict = hlp.import_json(path)
# count rooms and doors
n_rooms = len(jdict['rooms'])
n_doors = len(jdict['doors'])
# initialise object lists
self.rooms = [mcl.Room() for i in range(n_rooms)]
self.doors = [mcl.Door() for i in range(n_doors)]
# get rooms in dict
for ri in range(n_rooms):
# isolate room
jdict_room = jdict['rooms'][ri]
# count corners and sensors
n_corners = len(jdict_room['corners'])
n_sensors = len(jdict_room['sensors'])
# adopt name
self.rooms[ri].name = jdict_room['name']
# give room list of corner and sensor objects
self.rooms[ri].corners = [mcl.Corner(x=None, y=None) for i in range(n_corners)]
self.rooms[ri].sensors = [mcl.Sensor(x=None, y=None) for i in range(n_sensors)]
# update corners
for ci in range(n_corners):
# isolate json corner and give to room corner
jdict_corner = jdict_room['corners'][ci]
self.rooms[ri].corners[ci].set_coordinates(x=jdict_corner['x'], y=jdict_corner['y'])
# update sensors
for si in range(n_sensors):
# isolate json sensor and give to room sensor
jdict_sensor = jdict_room['sensors'][si]
self.rooms[ri].sensors[si].post_initialise(jdict_sensor['x'], jdict_sensor['y'], jdict_sensor['sensor_id'], room_number=ri)
# give t0 if exists
if 't0' in jdict_sensor:
self.rooms[ri].sensors[si].t = jdict_sensor['t0']
# get doors in dict
for di in range(n_doors):
# isolate doors
jdict_door = jdict['doors'][di]
# find rooms which door connects
r1 = None
r2 = None
for room in self.rooms:
if room.name == jdict_door['room1']:
r1 = room
if room.name == jdict_door['room2']:
r2 = room
# exit if rooms not found. Error in layout.
if r1 == None or r2 == None:
hlp.print_error('Error in layout. Door [{}] not connected to [{}] and [{}].'.format(jdict_door['name'], jdict_door['room1'], jdict_door['room2']), terminate=True)
# reformat for easier updating
p1 = [jdict_door['p1']['x'], jdict_door['p1']['y']]
p2 = [jdict_door['p2']['x'], jdict_door['p2']['y']]
# give variables to door object
self.doors[di].post_initialise(p1, p2, r1, r2, jdict_door['sensor_id'], di)
# give state if it exists
if 'closed' in jdict_door:
self.doors[di].closed = jdict_door['closed']
# adopt all sensors to self
self.sensors = []
for room in self.rooms:
for sensor in room.sensors:
self.sensors.append(sensor)
self.n_sensors = len(self.sensors)
# get objects of interest in dict
n_oofs = len(jdict['oofs'])
self.oofs = [mcl.Sensor(x=None, y=None) for i in range(n_oofs)]
for i, oof in enumerate(jdict['oofs']):
self.oofs[i].post_initialise(x=oof['x'], y=oof['y'], sensor_id=oof['sensor_id'], room_number=None)
# give t0 if exists
if 't0' in oof:
self.oofs[i].t = oof['t0']
def __generate_bounding_box(self):
"""
Set grid dimension limits based on layout corners.
"""
# find limits for x- and y-axis
self.xlim = [0, 0]
self.ylim = [0, 0]
# iterate rooms
for room in self.rooms:
# iterate corners in room:
for c in room.corners:
if c.x < self.xlim[0]:
self.xlim[0] = c.x
if c.x > self.xlim[1]:
self.xlim[1] = c.x
if c.y < self.ylim[0]:
self.ylim[0] = c.y
if c.y > self.ylim[1]:
self.ylim[1] = c.y
# rounding
self.xlim = [int(np.floor(self.xlim[0])), int(np.ceil(self.xlim[1]))]
self.ylim = [int(np.floor(self.ylim[0])), int(np.ceil(self.ylim[1]))]
# set maximum dimension for any axis
self.maxdim = max(self.xlim[1]-self.xlim[0], self.ylim[1]-self.ylim[0])
# generate interpolation axes
self.x_interp = np.linspace(self.xlim[0], self.xlim[1], int(self.resolution*(self.xlim[1]-self.xlim[0])+0.5))
self.y_interp = np.linspace(self.ylim[0], self.ylim[1], int(self.resolution*(self.ylim[1]-self.ylim[0])+0.5))
# convert to compatible grid
self.X, self.Y = np.meshgrid(self.x_interp, self.y_interp)
def __populate_grid(self, D, N, M, corner, room):
"""
Scan matrix and populate with euclidean distance for cells in line of sight of corner.
Parameters
----------
D : 2d ndarray
Matrix to be populated.
corner : object
Corner Point object for which we check line of sight.
Returns
-------
D : 2d ndarray
Populated matrix.
"""
# iterate x- and y-axis axis
for x, gx in enumerate(self.x_interp):
for y, gy in enumerate(self.y_interp):
# set active node
node = mcl.Point(self.x_interp[x], self.y_interp[y])
# get distance from corner to node if in line of sight
if not self.__has_direct_los(mcl.Point(corner.x+corner.dx, corner.y+corner.dy), node, room):
continue
d = hlp.euclidean_distance(corner.x, corner.y, node.x, node.y)
# update map if d is a valid value
if d != None:
# add distance from sensor to corner
d += corner.dmin
# update map if less than existing value
if D[y, x] == 0 or d < D[y, x]:
D[y, x] = d
N[y, x] = len(corner.visited_doors)
M[y][x] = [door.number for door in corner.visited_doors]
return D, N, M
def __reset_pathfinding_variables(self):
"""
Reset room, corner and door variables to their initial state.
"""
for room in self.rooms:
for corner in room.corners:
corner.dmin = None
corner.shortest_path = []
corner.visited_doors = []
corner.unused = True
for door in self.doors:
door.unused = True
for of in [door.o1, door.o2]:
of.dmin = None
of.shortest_path = []
of.visited_doors = []
def __euclidean_map_debug(self):
"""
Debug version of the euclidean distance mapping routine.
Does the same as __euclidean_map_threaded(), but without multithreading.
"""
# iterate sensors
for i, sensor in enumerate(self.sensors):
# initialise sensor distance map
sensor.emap = np.zeros(shape=self.X.shape)
# reset room corner distances
self.__reset_pathfinding_variables()
# recursively find shortest distance to all valid corners
path = []
doors = []
_, _ = self.__find_shortest_paths(sensor, self.rooms[sensor.room_number], path, doors, dr=0)
# initialise grids
sensor.D = np.zeros(shape=self.X.shape)
sensor.N = np.zeros(shape=self.X.shape)
sensor.M = [[[] for y in range(self.X.shape[1])] for x in range(self.X.shape[0])]
# populate map from sensor poitn of view
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, sensor, self.rooms[sensor.room_number])
if 1:
self.plot_debug(start=sensor, grid=[sensor.D])
# populate grid with distances from each corner
for ri, room in enumerate(self.rooms):
# fill from doors
for di, door in enumerate(self.doors):
print('Sensor {}, Room {}, Door {}'.format(i, ri, di))
if door.outbound_room == room:
offset_node = door.outbound_offset
if len(offset_node.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, offset_node, room)
# plot population process
if 1:
self.plot_debug(start=sensor, grid=[sensor.D], paths=offset_node.shortest_path)
# fill from corners
for ci, corner in enumerate(room.corners):
print('Sensor {}, Room {}, Corner {}'.format(i, ri, ci))
if len(corner.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, corner, room)
# plot population process
if 1:
self.plot_debug(start=sensor, grid=[sensor.D], paths=corner.shortest_path)
# plot population result
if 1:
self.plot_debug(start=sensor, grid=[sensor.D])
def __euclidean_map_threaded(self):
"""
Generate euclidean distance map for each sensor.
Applies multiprocessing for a significant reduction in execution time.
"""
def map_process(sensor, i):
"""
Same as __euclidean_map_threaded() but must be isolated in a function for multiprocessing.
Writes populated distance maps to cache_dir so that we only have to do this once. It's slow.
Parameters
----------
sensor : object
Sensor object with coordinates and temperature information.
i : int
Sensor number in list.
"""
self.__reset_pathfinding_variables()
# recursively find shortest path from sensor to all corners
path = []
doors = []
_, _ = self.__find_shortest_paths(sensor, self.rooms[sensor.room_number], path, doors, dr=0)
# initialise grids
sensor.D = np.zeros(shape=self.X.shape)
sensor.N = np.zeros(shape=self.X.shape)
sensor.M = [[[] for y in range(self.X.shape[1])] for x in range(self.X.shape[0])]
# populate map from sensor poitn of view
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, sensor, self.rooms[sensor.room_number])
# populate grid with distances from each corner
for ri, room in enumerate(self.rooms):
# fill from doors
for di, door in enumerate(self.doors):
print('Populating distance map: sensor {:>3}, room {:>3}, door {:>3}'.format(i, ri, di))
if door.outbound_room == room:
offset_node = door.outbound_offset
if len(offset_node.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, offset_node, room)
# fill from corners
for ci, corner in enumerate(room.corners):
print('Populating distance map: sensor {:>3}, room {:>3}, corner {:>3}'.format(i, ri, ci))
if len(corner.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, corner, room)
# write sensor object to pickle
hlp.write_pickle(sensor, os.path.join(self.cache_dir, self.pickle_id + '{}.pkl'.format(i)), cout=True)
# just skip everything and read from cache if so desired
if self.args['read']:
self.__get_cached_sensors()
return
# initialise variables needed for process
procs = []
nth_proc = 0
# iterate sensors
for i, sensor in enumerate(self.sensors):
# spawn a thread per sensor
proc = mpr.Process(target=map_process, args=(sensor, i))
procs.append(proc)
proc.start()
print('-- Process #{} spawned.'.format(nth_proc))
nth_proc = nth_proc + 1
# wait for each individual process to finish
nth_proc = 0
for proc in procs:
proc.join()
print('-- Process #{} completed.'.format(nth_proc))
nth_proc = nth_proc + 1
# fetch sensors from cache
self.__get_cached_sensors()
def __get_cached_sensors(self):
"""
Exchange self.sensors with sensors cached in cache_dir.
Usually called to recover previously calculated distance maps.
"""
# get files in cache
cache_files = os.listdir(self.cache_dir)
# iterate sensors
for i in range(self.n_sensors):
# keep track of if we found the pickle
found = False
# iterate files in cache
for f in cache_files:
# look for correct pickle
if self.pickle_id + '{}.pkl'.format(i) in f and not found:
# read pickle
pickle_path = os.path.join(self.cache_dir, self.pickle_id + '{}.pkl'.format(i))
pickle_sensor = hlp.read_pickle(pickle_path, cout=True)
# exchange
self.sensors[i].D = pickle_sensor.D
self.sensors[i].N = pickle_sensor.N
self.sensors[i].M = pickle_sensor.M
# found it
found = True
# shouldn't happen, but just in case
if not found:
hlp.print_error('Pickle at [{}] does not exist. Try running without --read.'.format(pickle_path), terminate=True)
def __find_shortest_paths(self, start, room, path, doors, dr):
"""
Recursively find the shortest path from sensor to every corner in layout.
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
path : list
List of previously visited points in the current recursive branch.
doors : list
List of doors which have been passed through in the current recursive branch.
dr : float
Total distance traveled from initial sensor start location.
Returns
-------
path : list
List of visited points in the current recursive branch, including current.
doors : list
List of doors which have been passed through in the current recursive branch.
"""
# append path with active node
path.append(start)
# stop if we've been here before on a shorter path
if start.dmin != None and dr > start.dmin:
return path, doors
# as this is currently the sortest path from sensor to active, copy it to active
start.dmin = dr
start.shortest_path = [p for p in path]
start.visited_doors = [d for d in doors]
# find candidate corners for path expansion
corner_candidates = self.__get_corner_candidates(start, room)
door_candidates = self.__get_door_candidates(start, room)
# plot candidates
if 0:
self.plot_debug(start=start, goals=corner_candidates + door_candidates, show=False)
# recursively iterate candidates
for c in corner_candidates:
# calculate distance to candidate
ddr = hlp.euclidean_distance(start.x, start.y, c.x, c.y)
# recursive
path, doors = self.__find_shortest_paths(c, room, path, doors, dr+ddr)
path.pop()
for c in corner_candidates:
c.unused = True
for d in door_candidates:
# calculate distance to candidate
ddr = hlp.euclidean_distance(start.x, start.y, d.inbound_offset.x, d.inbound_offset.y)
# fix offset
d.outbound_offset.dx = 0
d.outbound_offset.dy = 0
# append to doors list
doors.append(d)
# recursive
path, doors = self.__find_shortest_paths(d.outbound_offset, d.outbound_room, path, doors, dr+ddr)
# pop lists as we're back to current depth
path.pop()
doors.pop()
for d in door_candidates:
d.unused = True
return path, doors
def __get_corner_candidates(self, start, room):
"""
Return a list of corners which can be used as next step in recursive __find_shortest_paths().
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
Returns
-------
candidates : list
List of corners in room which can be used for next recursive step.
"""
# initialise list
candidates = []
# iterate corners in room
for i, corner in enumerate(room.corners):
# skip visisted
if not corner.unused:
continue
# get offset
dx, dy = self.__corner_offset(room.corners, i)
# check if corner is candidate material
if self.__has_direct_los(mcl.Point(start.x+start.dx, start.y+start.dy), mcl.Point(corner.x+dx, corner.y+dy), room):
corner.dx = dx
corner.dy = dy
candidates.append(corner)
corner.unused = False
return candidates
def __get_door_candidates(self, start, room):
"""
Return a list of doors which can be passed through as next step in recursive __find_shortest_paths().
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
Returns
-------
candidates : list
List of doors in room which can be passed through.
"""
# initialise list
candidates = []
# iterate corners in room
for door in self.doors:
# skip visisted
if not door.unused:
continue
# check if we have LOS to either offset
offset_start = mcl.Point(start.x+start.dx, start.y+start.dy)
if self.__has_direct_los(offset_start, door.o1, room):
if room == door.room1:
door.outbound_room = door.room2
else:
door.outbound_room = door.room1
door.inbound_offset = door.o1
door.outbound_offset = door.o2
candidates.append(door)
door.unused = False
elif self.__has_direct_los(offset_start, door.o2, room):
if room == door.room1:
door.outbound_room = door.room2
else:
door.outbound_room = door.room1
door.inbound_offset = door.o2
door.outbound_offset = door.o1
candidates.append(door)
door.unused = False
return candidates
def __has_direct_los(self, start, goal, room):
"""
Check if start has line of sight (LOS) to goal.
Parameters
----------
start : object
Point object used as point of view.
goal : object
Point object we check if we have LOS to.
Returns
-------
return : float
Returns euclidean distance from start to goal if LOS is True.
Returns None if no LOS.
"""
# check if los
for i in range(len(room.corners)):
# two corners define a wall which can be intersected
ir = i + 1
if ir > len(room.corners)-1:
ir = 0
if self.__line_intersects(start, goal, room.corners[i], room.corners[ir]):
return False
return True
def __line_intersects(self, p1, q1, p2, q2):
"""
Determine if two lines intersect in 2-D space.
Parameters
----------
p1 : float
x-coordinate of first line.
q1 : float
y-coordinate of first line.
p2 : float
x-coordinate of second line.
q2 : float
y-coordinate of second line.
Returns
-------
return : bool
True if lines intersect.
False if no intersect.
"""
# find the 4 orientations required for the general and special cases
o1 = self.__orientation(p1, q1, p2)
o2 = self.__orientation(p1, q1, q2)
o3 = self.__orientation(p2, q2, p1)
o4 = self.__orientation(p2, q2, q1)
# General case
if ((o1 != o2) and (o3 != o4)):
return True
# special Cases
# p1 , q1 and p2 are colinear and p2 lies on segment p1q1
if ((o1 == 0) and self.__on_segment(p1, p2, q1)):
return True
# p1 , q1 and q2 are colinear and q2 lies on segment p1q1
if ((o2 == 0) and self.__on_segment(p1, q2, q1)):
return True
# p2 , q2 and p1 are colinear and p1 lies on segment p2q2
if ((o3 == 0) and self.__on_segment(p2, p1, q2)):
return True
# p2 , q2 and q1 are colinear and q1 lies on segment p2q2
if ((o4 == 0) and self.__on_segment(p2, q1, q2)):
return True
# if none of the cases
return False
def __orientation(self, p, q, r):
"""
Find the orientation of an ordered triplet (p,q,r) function.
See https://www.geeksforgeeks.org/orientation-3-ordered-points/amp/ for details.
Parameters
----------
p : float
First triplet index.
q : float
Second triplet index.
r : float
Third triplet index.
Returns
-------
return : int
0 if colinear points
1 if clockwise points
2 if counterclockwise
"""
val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))
if (val > 0):
# Clockwise orientation
return 1
elif (val < 0):
# Counterclockwise orientation
return 2
else:
# Colinear orientation
return 0
def __on_segment(self, p, q, r):
"""
Determine if q is on the segment p-r.
Parameters
----------
p : float
First triplet index.
q : float
Second triplet index.
r : float
Third triplet index.
Returns
-------
return : bool
True if on segment.
False if not on segment.
"""
if ( (q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and
(q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):
return True
return False
def __corner_offset(self, corners, i, eps=1/1e3):
"""
Generate a tiny offset in corner convex direction.
Parameters
----------
corners : list
List of corner objects in a room.
i : int
Index of current corner of interest in corner list.
eps : float
Distance of offset. Should be small.
Returns
-------
x_offset : float
Offset in the x-direction.
y_offset : float
Offset in the y-direction.
"""
# circular buffer behavior for list edges
il = i - 1
if il < 0:
il = -1
ir = i + 1
if ir > len(corners) - 1:
ir = 0
# isolate corner triplet around corner of interest
pl = corners[il]
pc = corners[i]
pr = corners[ir]
# get complex direction of corner triplet
mx = np.sign(((pc.x - pl.x) + (pc.x - pr.x)) / 2)
my = np.sign(((pc.y - pl.y) + (pc.y - pr.y)) / 2)
# plot for debugging purposes
if 0:
plt.cla()
for room in self.rooms:
xx, yy = room.get_outline()
plt.plot(xx, yy, '-k', linewidth=3)
plt.plot(pl.x, pl.y, 'or')
plt.plot(pr.x, pr.y, 'og')
plt.plot(pc.x, pc.y, 'ok')
plt.plot([pc.x, pl.x], [pc.y, pl.y], 'o-r', linewidth=3)
plt.plot([pc.x, pr.x], [pc.y, pr.y], 'o-g', linewidth=3)
plt.plot([pc.x, pc.x+mx], [pc.y, pc.y+my], 'o--k')
plt.waitforbuttonpress()
# multiply by epsilon
x_offset = mx * eps
y_offset = my * eps
return x_offset, y_offset
def update_heatmap(self):
"""
Using calculated distance- and door maps, update heatmap with temperature data.
"""
# iterate x- and y-axis axis
for x, gx in enumerate(self.x_interp):
for y, gy in enumerate(self.y_interp):
# reset lists
temperatures = []
distances = []
weights = []
# iterate sensors
for room in self.rooms:
for sensor in room.sensors:
los = True
# check if doors in path are closed
if len(sensor.M[y][x]) > 0:
for door in self.doors:
if door.closed and door.number in sensor.M[y][x]:
los = False
# check if distance grid is valid here
if los and sensor.D[y, x] > 0 and sensor.t != None:
temperatures.append(sensor.t)
distances.append(sensor.D[y, x])
# do nothing if no valid distances
if len(distances) == 0:
self.heatmap[y, x] = None
elif len(distances) == 1:
self.heatmap[y, x] = temperatures[0]
else:
# calculate weighted average
weights = (1/(np.array(distances)))**2
temperatures = | np.array(temperatures) | numpy.array |
import gettext
import unittest
import numpy
import scipy.ndimage
# local libraries
from nion.swift import Facade
from nion.data import DataAndMetadata
from nion.swift.test import TestContext
from nion.ui import TestUI
from nion.swift import Application
from nion.swift.model import DocumentModel
from nionswift_plugin.nion_experimental_tools import MultiDimensionalProcessing
_ = gettext.gettext
Facade.initialize()
def create_memory_profile_context() -> TestContext.MemoryProfileContext:
return TestContext.MemoryProfileContext()
class TestMultiDimensionalProcessing(unittest.TestCase):
def setUp(self):
self.app = Application.Application(TestUI.UserInterface(), set_global=True)
self.app.workspace_dir = str()
def tearDown(self):
pass
def test_function_apply_multi_dimensional_shifts_4d(self):
with self.subTest("Test for a sequence of SIs, shift collection dimensions along sequence axis"):
shape = (5, 2, 3, 4)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 1))
shifts = numpy.array([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "collection")
shifted = numpy.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimage.shift(data[i], [shifts[i, 0], shifts[i, 1], 0.0], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
with self.subTest("Test for a sequence of 1D collections of 2D data, shift data dimensions along sequence axis"):
shape = (5, 2, 3, 4)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 1, 2))
shifts = numpy.array([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = numpy.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimage.shift(data[i], [0.0, shifts[i, 0], shifts[i, 1]], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
with self.subTest("Test for a sequence of SIs, shift data dimensions along collection and sequence axis"):
shape = (5, 2, 3, 4)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 1))
shifts = numpy.linspace(0, 3, num=numpy.prod(shape[:-1])).reshape(shape[:-1])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = numpy.empty_like(data)
for k in range(shape[0]):
for i in range(shape[1]):
for j in range(shape[2]):
shifted[k, i, j] = scipy.ndimage.shift(data[k, i, j], [shifts[k, i, j]], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
def test_function_apply_multi_dimensional_shifts_5d(self):
with self.subTest("Test for a sequence of 4D images, shift collection dimensions along sequence axis"):
shape = (5, 2, 3, 4, 6)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = numpy.array([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "collection")
shifted = numpy.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimage.shift(data[i], [shifts[i, 0], shifts[i, 1], 0.0, 0.0], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
with self.subTest("Test for a sequence of 4D images, shift data dimensions along sequence axis"):
shape = (5, 2, 3, 4, 6)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = numpy.array([(0., 1.), (0., 2.), (0., 3.), (0., 4.), (0., 5.)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "data")
shifted = numpy.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimage.shift(data[i], [0.0, 0.0, shifts[i, 0], shifts[i, 1]], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
with self.subTest("Test for a sequence of 4D images, shift sequence dimension along collection axis"):
shape = (5, 2, 3, 4, 6)
data = numpy.arange(numpy.prod(shape)).reshape(shape)
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=DataAndMetadata.DataDescriptor(True, 2, 2))
shifts = numpy.array([(1., 1.5, 2.),
(2.5, 3., 3.5)])
result = MultiDimensionalProcessing.function_apply_multi_dimensional_shifts(xdata, shifts, "sequence")
shifted = numpy.empty_like(data)
for k in range(shape[1]):
for i in range(shape[2]):
shifted[:, k, i] = scipy.ndimage.shift(data[:, k, i], [shifts[k, i], 0., 0.], order=1)
self.assertTrue(numpy.allclose(result.data, shifted))
def test_function_measure_multi_dimensional_shifts_3d(self):
with self.subTest("Test for a sequence of 2D data, measure shift of data dimensions along sequence axis"):
shape = (5, 100, 100)
reference_index = 0
data = numpy.random.rand(*shape[1:])
data = scipy.ndimage.gaussian_filter(data, 3.0)
data = numpy.repeat(data[numpy.newaxis, ...], shape[0], axis=0)
shifts = numpy.array([(0., 2.), (0., 5.), (0., 10.), (0., 2.5), (0., 3.)])
shifted = numpy.empty_like(data)
for i in range(shape[0]):
shifted[i] = scipy.ndimage.shift(data[i], [shifts[i, 0], shifts[i, 1]], order=1, cval=numpy.mean(data))
shifted_xdata = DataAndMetadata.new_data_and_metadata(shifted, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
result = MultiDimensionalProcessing.function_measure_multi_dimensional_shifts(shifted_xdata,
"data",
reference_index=reference_index)
self.assertTrue(numpy.allclose(result.data, -1.0 * (shifts - shifts[reference_index]), atol=0.5))
with self.subTest("Test for a 2D collection of 1D data, measure shift of data dimensions along collection axis"):
shape = (5, 5, 100)
reference_index = 0
data = numpy.random.rand(*shape[2:])
data = scipy.ndimage.gaussian_filter(data, 3.0)
data = numpy.repeat(numpy.repeat(data[numpy.newaxis, ...], shape[1], axis=0)[numpy.newaxis, ...], shape[0], axis=0)
shifts = numpy.random.rand(*shape[:2]) * 10.0
shifted = numpy.empty_like(data)
for i in range(shape[0]):
for j in range(shape[1]):
shifted[i, j] = scipy.ndimage.shift(data[i, j], [shifts[i, j]], order=1, cval=numpy.mean(data))
shifted_xdata = DataAndMetadata.new_data_and_metadata(shifted, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 1))
result = MultiDimensionalProcessing.function_measure_multi_dimensional_shifts(shifted_xdata,
"data",
reference_index=reference_index)
self.assertTrue(numpy.allclose(result.data, -1.0 * (shifts - shifts[numpy.unravel_index(reference_index, shifts.shape)]), atol=0.5))
with self.subTest("Test for a sequence of 2D data, measure shift of data dimensions along sequence axis relative to previous slice"):
shape = (5, 100, 100)
data = numpy.random.rand(*shape[1:])
data = scipy.ndimage.gaussian_filter(data, 3.0)
data = | numpy.repeat(data[numpy.newaxis, ...], shape[0], axis=0) | numpy.repeat |
"""
Shared and general data handling functionality.
"""
import json
import os
import pickle
import numpy as np
from sklearn.utils import shuffle
def index_make_random_shuffle(x):
"""
Shuffle indexarray.
Args:
x (np.array): Index to shuffle.
Returns:
np.array: Shuffled index.
"""
return shuffle(x)
def make_random_shuffle(datalist, shuffle_ind=None):
"""
Shuffle a list od data.
Args:
datalist (list): List of numpy arrays of same length (axis=0).
shuffle_ind (np.array): Array of shuffled index
Returns:
outlist (list): List of the shuffled data.
"""
datalen = len(datalist[0]) # this should be x data
for x in datalist:
if len(x) != datalen:
print("Error: Data has inconsisten length")
if shuffle_ind is None:
allind = shuffle(np.arange(datalen))
else:
allind = shuffle_ind
if len(allind) != datalen:
print("Warning: Datalength and shuffle index does not match")
outlist = []
for x in datalist:
outlist.append(x[allind])
return allind, outlist
def save_data_to_folder(x, y, target_model, mod_dir, random_shuffle):
"""
Save all training data for model mlp_eg to folder.
Args:
x (np.array): Coordinates as x-data.
y (list): A possible list of np.arrays for y-values. Energy, Gradients, NAC etc.
target_model (str): Name of the Model to save data for.
mod_dir (str): Path of model directory.
random_shuffle (bool, optional): Whether to shuffle data before save. The default is False.
Returns:
None.
"""
# Save data:
if not random_shuffle:
with open(os.path.join(mod_dir, 'data_x'), 'wb') as f:
pickle.dump(x, f)
with open(os.path.join(mod_dir, 'data_y'), 'wb') as f:
pickle.dump(y, f)
else:
if isinstance(y, list):
shuffle_list = [x] + y
else:
shuffle_list = [x] + [y]
# Make random shuffle
ind_shuffle, datalist = make_random_shuffle(shuffle_list)
x_out = datalist[0]
if len(datalist) > 2:
y_out = datalist[1:]
else:
y_out = datalist[1]
np.save(os.path.join(mod_dir, 'shuffle_index.npy'), ind_shuffle)
with open(os.path.join(mod_dir, 'data_x'), 'wb') as f:
pickle.dump(x_out, f)
with open(os.path.join(mod_dir, 'data_y'), 'wb') as f:
pickle.dump(y_out, f)
def split_validation_training_index(allind, splitsize, do_offset, offset_steps):
"""
Make a train-validation split for indexarray. Validation set is taken from beginning with possible offset.
Args:
allind (np.array): Indexlist for full dataset of same length.
splitsize (int): Total number of validation samples to take.
do_offset (bool): Whether to take validation set not from beginnig but with offset.
offset_steps (int): Number of validation sizes offseted from the beginning to start to take validation set.
Returns:
i_train (np.array): Training indices
i_val (np.array): Validation indices.
"""
i = offset_steps
lval = splitsize
if not do_offset:
i_val = allind[:lval]
i_train = allind[lval:]
else:
i_val = allind[i * lval:(i + 1) * lval]
i_train = | np.concatenate([allind[0:i * lval], allind[(i + 1) * lval:]], axis=0) | numpy.concatenate |
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import subprocess
points = [ # define the shape
[ 0.8,-np.sqrt(6**2 - .8**2) + 1e-14, -1],
[ 0.8, 0.0, 1],
[ 3.5, 0.0, 1],
[ 3.5, 3.0, 1],
[-3.0, 3.0, 1],
[-5.0, 0.0, 1],
[-0.8, 0.0, 1],
[-0.8,-np.sqrt(6**2 - .8**2) + 1e-14, -1],
]
for i in range(len(points), 0, -1): # populate edges
p0, p1 = points[i-1], points[i%len(points)]
for c in np.linspace(0, 1, int(36*np.hypot(p1[0] - p0[0], p1[1] - p0[1])))[1:-1]:
points.insert(i, [c*p0[0] + (1-c)*p1[0], c*p0[1] + (1-c)*p1[1], max(p0[2], p1[2])])
points = np.array(points)/6 # convert to numpy array and rescale
for i in range(points.shape[0]): # rotate
points[i,:2] = np.matmul([[np.sqrt(3)/2, 1/2], [-1/2, np.sqrt(3)/2]], points[i,:2])
coords = np.vstack([np.arcsin(points[:,1]), np.arcsin(points[:,0]/np.sqrt(1 - points[:,1]**2))]).T # project
coords[points[:,2] < 0, 1] = -np.pi - coords[points[:,2] < 0, 1]
try:
os.mkdir('../res/frames')
except FileExistsError:
pass
fig = plt.figure()
fig.set_size_inches((1, 1))
ax = plt.Axes(fig, [0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
for i, t in enumerate(range(180, 540, 6)): # draw it
ax.clear()
# ax.fill(np.cos(np.linspace(0, 2*np.pi)), np.sin(np.linspace(0, 2*np.pi)), color='#8393bf')
θ = np.radians(t)
y = np.sin(coords[:,0])
x = np.sqrt(1 - y**2)*np.sin(coords[:,1] + θ)
z = np.sqrt(1 - y**2)*np.cos(coords[:,1] + θ)
if np.any(z > 0): # reproject with longitudinal rotation
side = np.copysign(1, x[np.argmax(np.where(z >= 0, | np.abs(x) | numpy.abs |
import numpy as np
def color_libs(index=0):
clibs = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255)]
index = index%5
return clibs[index]
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
#scores = dets[:, 4] #bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#打分从大到小排列,取index
order = scores.argsort()[::-1]
#keep为最后保留的边框
keep = []
while order.size > 0:
#order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
#计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = np.where(ovr <= thresh)[0]
#order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口
order = order[inds + 1]
return keep
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = | np.exp(-(ovr * ovr) / sigma) | numpy.exp |
import numpy as np
def calculate_reverse_indices(incount, full_index, sparse=False):
"""Calculates the indices that go into each bin in a histogram
and returns an array of values where
ri[index] is list of indices of bins in the source data that
are part of the [index] bin in the histogram.
Called from within grid_data only.
"""
#Two arrays to consider here
#The count in each bin, regardless of data validity (missing data, masks, etc.)
#called count_full, and the full_index that places every element into the output array
#We define two arrays, rii contains arrays of each for each output bin, where each
#array is the list of indices that go into that bin. rjj contains
array_length = incount.size
shp = incount.shape
#create the storage arrays
rii = np.empty(array_length, dtype=np.ndarray)
rjj = np.zeros(array_length, dtype=np.int)
#flatten the incount array
c = np.reshape(incount, incount.size)
#calculate the unique values and their indices, and an array that can
#be used to reconstruct the full_index
(fi_value, fi_index, fi_inverse) = \
np.unique(full_index, return_index=True, return_inverse=True)
#count the elements that go into each bin
bc_fi_inverse = np.bincount(fi_inverse)
#IF we're in sparse mode, then we should index into a counter array, instead of the real fi_value array
if sparse:
locations = np.arange(fi_value.size)
else:
locations= fi_value
#new inversion code
temp = np.argsort(fi_inverse)
counter=0
for count,elem in enumerate(locations):
rjj[elem] = bc_fi_inverse[count]
rii[elem] = np.sort(temp[counter:counter+rjj[elem]])# np.where(fi_inverse==count)[0]
counter=counter+rjj[elem]
#The commented-out code should be the equivalent code, but it's slower than the where method above
#If there are problems, change the code back to calculate the full arrays as below.
# #For each pair of value and its count, define an array
# #of the appropriate (count) size
# for loc, val in zip(locations, bc_fi_inverse):
# if val > 0:
# rii[loc] = np.zeros(val, dtype=int)
# #no that we've allocated enough room
# #loop through the inverse array, find its location in the main array outside
# #using the fi_value lookup (we could have just used full_index, but not in sparse mode)
#
#
# for count, val in enumerate(fi_inverse):
# #I think this can be replaced by index=locations[val] after the definition above
# if sparse is False:
# index = fi_value[val]
# else:
# index = val
# #rjj'th element of the riith element is set the location (count) of the value in fi_inverse,
# #which is equivalent to the location in the input array
# rii[index][rjj[index]] = count
# #a counter, to know which element we should write to next.
# rjj[index] = rjj[index] + 1
#finally, reshape, and output
rii = np.reshape(rii, shp)
rjj = np.reshape(rjj, shp)
return rii, rjj
def calculate_statistics(full_index, weights,
bs, square=None, sparse=False, missing_data=None):
"""
Calculates the mean, variance,and count of each bin indexed in full_index with data coming from
weights
"""
#we have two data arrays here. The first is the 'full_index' that identifies
#the bin into which each element goes, and the un-normalized weight of that element
#in weights. In reality, 'weights' is the data we want to grid. The function we call
#later (bincount) happens to consider them as weights to a different array
#but in doing so calculates the statistics appropriately.
#calculate the product of bin sizes to get the largest possible array size
sz = 1
for d in bs:
sz = sz * d
#now call numpy.unique asking for the
#fi_value = values that give a unique array
#fi_index = the indices of the values in fi_value
#fi_inverse = the location of each element in full_index in fi_value
#can be used to reconstruct the full_index value OR used to bin properly later
(fi_value, fi_index, fi_inverse) =\
np.unique(full_index, return_index=True, return_inverse=True)
#now deal with missing data if present. #If we find no real data (len(w[0])==0)
#then our answer is pretty simple as the result is also all zeros (for variance and count),
#or missing for mean.
if missing_data:
w = np.where(weights != missing_data)
if len(w[0]):
index = w[0]
else:
if sparse is False:
m = np.zeros(bs)
return (m + missing_data, m.copy(), m.copy())
else:
s = np.zeros(len(fi_value))
return (s + missing_data, s.copy(), s.copy())
#OR if the arrays are masked numpy arrays, then we perform the same analysis
#but ask whether the mask is
#just a single bool, then perform the same test as above
#an array of bools, then invert it to get the data selection array
#(mask==true -> bad data), (index=true -> good_data)
elif isinstance(weights, np.ma.core.MaskedArray):
if isinstance(weights.mask, np.bool_):
if not weights.mask:
index = slice(None, None, None)
else:
if sparse is False:
m = np.zeros(bs)
return (m + missing_data, m.copy(), m.copy())
else:
s = np.zeros(len(fi_value))
return (s + missing_data, s.copy(), s.copy())
elif isinstance(weights.mask, np.ndarray):
index = ~weights.mask
#OR there's no missing data, select all the data
else:
index = slice(None, None, None)
#for convenient later, we perform a count on all of the data, regardless of
#it's validity
count_full = np.bincount(fi_inverse[slice(None, None, None)])
#No calculate the count (unweighted bincount) of the valid data
#and the sum of the data in each bin (weighted by data)
#and the sum of squares (weighted by data**2)
try:
count = np.bincount(fi_inverse[index])
su = np.bincount(fi_inverse[index], weights=weights[index])
var = np.bincount(fi_inverse[index], weights=weights[index] ** 2)
#not sure what exception I'm expecting here, anymore.
except:
count = 0. * count_full
su = 0. * count_full
var = 0. * count_full
#if we DONT want sparse data then the array_length is product of all dimensions
#and the shape is given in the binsize array bs
#if we DO want sparse data, the array_length is the largest number in the unique inverse
#array fi_inverse, which seems like it should eqyal the length of fi_value
if sparse is False:
array_length = sz
shp = bs
else:
array_length = np.max(fi_inverse) + 1
shp = np.max(fi_inverse) + 1
#construct the output arrays
m = np.zeros(array_length)
c = np.zeros(array_length)
cf = np.zeros(array_length)
v = np.zeros(array_length)
w = np.where(count != 0)[0]
#for count_full we can't look for the number of valid elements, because there don't
#have to be any, so we copy the data immediately
if sparse is False:
cf[fi_value] = count_full
else:
cf = count_full #no indexing -> straight copy
#if there is data (w.size>0) then save the data in the output, otherwise
#the initialization to 0 is good enough.
#If NOT sparse, then the indexing is stored in fi_value (where its count is
#nonzero
#if YES sparse, then the indexing is just the locations of nonzero elements
#which I think is every element in w.
forward_index = None
forward_index = fi_value
if w.size:
if sparse is False:
index = fi_value[w]
else:
index = w
#divide the sums by the counts to get means,
m[index] = su[w] / count[w]
v[index] = var[w] / count[w]
c[index] = count[w]
# cf[index] = count_full[w]
#if square is set, return the mean of the square, not the variance.
if square is None:
v = v - m * m
else:
pass
#in NON sparse mode, we need to reshape the output to N dimensional arrays
#otherwise shp is just the array length and nothing happens here.
m = np.reshape(m, shp)
v = np.reshape(v, shp)
c = np.reshape(c, shp)
cf = np.reshape(cf, shp)
return (m, v, c, cf, forward_index)
def grid_data(data, bins, mn=None, mx=None,
reverse_indices=None, square=None,
sparse=False, missing_data=None):
"""Grids data into regular grid of bins
data is an N*M numpy array, where:
N is the number of data points,
A<M is the number of dimensions we are binning over
and D=M-A is the number of dimensions of data we bin.
Bins can be a list of arrays of bin edges, or a list of
bin sizes (integers). If bin edges are given, they are
assumed to be monotonic and used as in bincount. If they
are bin sizes, optional maximum (mx) and minimum (mn)
values for all dimensions can be given. In this case,
the edge bins (above mx, or below mn) contain the overflow data.
By default, the function returns the mean value, the variance,
and the counts of each bin. If reverse_indices is set to True,
the indices in the data array (along the N dimension) of each
element that contributes to every bin is stored in a variable
length array in that bin.
:param data: 2-dimensional(N,M) array of data to grid.
The first A columns are used as axes and require corresponding entries
in the bins list. The remaining D columns are data to be binned
:type data: numpy array
:param bins: list of bin edges (upper edge) or bin lengths for each axis in A
:type bins: list
:param mn: list of bin minima if the bins list contains integer number of bins
:type mn: list
:param mx: list of bin maxima if the bins list contains integer number of bins
:type mx: list
:param reverse_indices: logical to determine if the indices that are used in each grid box are calculated
:type reverse_indices: bool
:param square: logical to determine if the mean of the square data mean(X**2) is returned instead of variance (mean(X**2)-mean(X)**2)
:type square: bool
:param sparse: logical to determine if the sparse method is used instead and a 1D gridded dataset is created
:type sparse: bool
:param missing_data: list of missing data entries for each data array in D(better to mask arrays instead)
:type missing_data: list
:return: Mean, calculated mean value of each grid box
:return: Variance, calculated variance of each grid box (or mean of square)
:return: Count, number of elements in each grid box
With reverse_indices=true
:return: ri, array containing a list for each gridpoint containing the index back into D of each datapoint
:return: rj, the length of each list in ri
Types
:rtype: tuple of (numpy (D+1)d, numpy (D+1)d, numpy (D+1)d) (no reverse_indices, no sparse)
:rtype: tuple of (numpy 1d, numpy 1d, numpy 1d) (no reverse_indices, sparse)
:rtype: tuple of (numpy (D+1)d, numpy (D+1)d, numpy (D+1)d,numpy (D+1)d,numpy (D+1)d) (reverse_indices, no sparse)
:rtype: tuple of (numpy 1d, numpy 1d, numpy 1d,numpy 1d,numpy 1d) (reverse_indices, sparse)
"""
#glossary
#axis/axes = The first M rows of data that are used to filter the data into a grid
#bins = The values of the axes that are used to group elements
#data = The non-filterable part of the input data that is grouped by axes into bins
import numpy as np
#get the shape of the incoming data, create a bin size array read for bin data
s = data.shape
bs = np.zeros(len(bins), dtype=np.int)
#if the first element is a numpy array, they should all be
#then I assume that each element in bins contains the upper bin edges
#of each dimension.
if type(bins[0]) == np.ndarray:
#if arrays are given in 'bins', assume they are bin edges
use_bins = bins
for i in range(len(bs)):
bs[i] = len(use_bins[i])
pass
pass
else:
#if non-lists (int,long,float) are given, assume they are nbins
#convert them to longs
bs = [int(b) for b in bins]
#if min or max are not given, calculate them
if mn is None:
mn = np.min(data, 0)
if mx is None:
mx = | np.max(data, 0) | numpy.max |
import numpy as np
from scipy.stats import describe
def moments(data,goodbad=False,robust=None,silent=True):
'''
(Robustly) computes various statistics
Input Parameters
----------------
data : numpy.ndarray
goodbad : numpy.ndarray, optional
An array with the same shape as `data` that identifies good and
bad data points. 0=bad, 1=good, 2=NaN
robust : float, optional
If given, outliers are identified before computing the stats.
See notes for details.
silent : {True, False}, optional
If False, the result will be written to the command line.
Returns
--------
dict
A dict where with the following entries:
ndat : int
The numger of data points used in the calculation.
mean : float
The mean of the (non-NaN, good) data points.
variance : float
Estimate of the variance of the (non-NaN, good) data points.
That is, the denominator is 1/(ndat-1).
stddev : float
Estimate of the standard deviation of the (non-NaN, good)
data points. That is, the denominator is 1/(ndat-1).
stderr : float
The standard error of the (non-NaN, good) data points.
`stddev`/sqrt(ndat)
skewness : float
The skewness of the (non-NaN, good) data points.
kurtosis : float
The kurtosis of the (non-NaN, good) data points.
goodbad : numpy.ndarray of int
An array with the same shape as `data` that identifies good and
bad data points. 0=bad, 1=good, 2=NaN
Notes
-----
If goodbad is passed, only points with values of 1 are used. If
robust is passed, the median and median absolute deviation and
points are idetentified as an outlier if:
|x_i - MED|/(1.4826*MAD) > robust
where MAD = median(|x_i - MED|) and 1.4826*MAD is a robust estimate
of the standard deviation for a gaussian distribution. Outliers are
labelled `bad` in the goodbad array. Finally, the statistics are
computed using scipy.stats.describe.
NOTE: The variance and standard deviations are *estimates* of the
variance and standard deviation of the parent population and so
have 1/(ndat-1) in the denominator.
Examples
--------
> import numpy as np
> data = np.array([[np.nan,1.2,20],[2.,1.2,2.5]])
> m = moments(data,robust=4,silent=False)
Moments results:
Total number of input points = 6
Number of good points = 4
Number of NaNs = 1
Mean = 1.725
Variance = 0.4091666666666667
Standar Deviation = 0.6396613687465162
Standard Error = 0.3198306843732581
Skewness = 0.28952649685958215
Kurtosis = -1.6237779003737334
[[2 1 0]
[1 1 1]]
Modification History
--------------------
2022-05-24 - Written by <NAME>, University of Toledo.
Based on Spextool IDL program mc_moments.pro.
'''
# Set up goodbad array if need be
if goodbad is False: goodbad = np.full_like(data,1,dtype=int)
# Check for NaNs and update goodbad array
nanbool = | np.isnan(data) | numpy.isnan |
#!/usr/bin/env python
#
# ======================================================================
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/feassemble/TestFIATSimplex.py
## @brief Unit testing of FIATSimplex object.
import unittest
import numpy
from pylith.feassemble.FIATSimplex import FIATSimplex
from pylith.utils.testarray import test_scalararray
# ----------------------------------------------------------------------
class Tri3(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0]])
quadPts = numpy.array([ [-1.0/3.0, -1.0/3.0] ])
quadWts = numpy.array( [2.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (1, 3), dtype=numpy.float64)
basisDeriv = numpy.zeros( (1, 3, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
dtype=numpy.float64).reshape( (3,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)]])
basisDeriv[iQuad] = deriv.reshape((3, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
def N0(self, p):
return 0.5*(-p[0]-p[1])
def N0p(self, p):
return -0.5
def N0q(self, p):
return -0.5
def N1(self, p):
return 0.5*(1.0+p[0])
def N1p(self, p):
return 0.5
def N1q(self, p):
return 0.0
def N2(self, p):
return 0.5*(1.0+p[1])
def N2p(self, p):
return 0.0
def N2q(self, p):
return 0.5
# ----------------------------------------------------------------------
class Tri3Collocated(Tri3):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0]])
quadPts = vertices[:]
quadWts = numpy.array( [2.0/3.0, 2.0/3.0, 2.0/3.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (3, 3), dtype=numpy.float64)
basisDeriv = numpy.zeros( (3, 3, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
dtype=numpy.float64).reshape( (3,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)]])
basisDeriv[iQuad] = deriv.reshape((3, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
# ----------------------------------------------------------------------
class Tri6(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0],
[ 0.0, -1.0],
[ 0.0, 0.0],
[-1.0, 0.0]])
quadPts = numpy.array([ [-0.64288254, -0.68989795],
[-0.84993778, 0.28989795],
[ 0.33278049, -0.68989795],
[-0.43996017, 0.28989795]])
quadWts = numpy.array( [0.63608276, 0.36391724, 0.63608276, 0.36391724])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (4, 6), dtype=numpy.float64)
basisDeriv = numpy.zeros( (4, 6, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q),
self.N3(q), self.N4(q), self.N5(q)],
dtype=numpy.float64).reshape( (6,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)],
[self.N3p(q), self.N3q(q)],
[self.N4p(q), self.N4q(q)],
[self.N5p(q), self.N5q(q)]])
basisDeriv[iQuad] = deriv.reshape((6, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
def N0(self, p):
return 0.5*(-p[0]-p[1])*(-1.0-p[0]-p[1])
def N0p(self, p):
return 0.5+p[0]+p[1]
def N0q(self, p):
return 0.5+p[0]+p[1]
def N1(self, p):
return 0.5*(1.0+p[0])*(p[0])
def N1p(self, p):
return 0.5+p[0]
def N1q(self, p):
return 0
def N2(self, p):
return 0.5*(1.0+p[1])*(p[1])
def N2p(self, p):
return 0
def N2q(self, p):
return 0.5+p[1]
def N3(self, p):
return (-p[0]-p[1])*(1+p[0])
def N3p(self, p):
return -1.0-2*p[0]-p[1]
def N3q(self, p):
return -(1+p[0])
def N4(self, p):
return (1.0+p[0])*(1+p[1])
def N4p(self, p):
return (1+p[1])
def N4q(self, p):
return (1.0+p[0])
def N5(self, p):
return (-p[0]-p[1])*(1+p[1])
def N5p(self, p):
return -(1+p[1])
def N5q(self, p):
return -1.0-p[0]-2*p[1]
# ----------------------------------------------------------------------
class Tet4(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[+1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0]])
quadPts = | numpy.array([ [-1.0/2.0, -1.0/2.0, -1.0/2.0] ]) | numpy.array |
import numpy as np
import h5py as h5
from converters import convgeo2ply
def extract_geometry(data_file, output_dir, nth_coord):
"""
Extracts the geometry of the body used in Abhiram's simulations of flow around an axisymmetric ramp body.
In his simulations, the geometry is located at [k,j,i]=[1,:,:] (non-cartesian coordinate system)
Geometry is saved to a .ply file.
:param data_file: File to extract geometry from
:param output_dir: Output directory within which to save geometry file (just directory, no filename).
:param nth_coord: Save geometry with every nth coordinate (i.e. skip n-1 coords before saving the nth one). This helps reduce unnecessary mesh complexity. Higher is less detailed.
"""
# Open file
data = h5.File(data_file, "r")
# Extract mesh coords
xpt2f = | np.ndarray.flatten(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, ::nth_coord], order="C") | numpy.ndarray.flatten |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script contains unit tests of the :mod:`rmgpy.statmech.vibration` module.
"""
import unittest
import math
import numpy
from rmgpy.statmech.vibration import HarmonicOscillator
import rmgpy.constants as constants
################################################################################
class TestHarmonicOscillator(unittest.TestCase):
"""
Contains unit tests of the HarmonicOscillator class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.frequencies = numpy.array([500, 1000, 2000])
self.quantum = True
self.mode = HarmonicOscillator(
frequencies = (self.frequencies,"cm^-1"),
quantum = self.quantum,
)
def test_getPartitionFunction_classical(self):
"""
Test the HarmonicOscillator.getPartitionFunction() method for a set of
classical oscillators.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([0.00906536, 0.04196925, 0.335754, 1.13316978, 2.68603])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getPartitionFunction_quantum(self):
"""
Test the HarmonicOscillator.getPartitionFunction() method for a set of
quantum oscillators.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Qexplist = numpy.array([1.10923, 1.39358, 2.70819, 4.98825, 8.459780])
for T, Qexp in zip(Tlist, Qexplist):
Qact = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getHeatCapacity_classical(self):
"""
Test the HarmonicOscillator.getHeatCapacity() method using a set of
classical oscillators.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([3, 3, 3, 3, 3]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getHeatCapacity_quantum(self):
"""
Test the HarmonicOscillator.getHeatCapacity() method using a set of
quantum oscillators.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Cvexplist = numpy.array([0.832004, 1.47271, 2.32513, 2.65024, 2.79124]) * constants.R
for T, Cvexp in zip(Tlist, Cvexplist):
Cvact = self.mode.getHeatCapacity(T)
self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp)
def test_getEnthalpy_classical(self):
"""
Test the HarmonicOscillator.getEnthalpy() method using a set of
classical oscillators.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([3, 3, 3, 3, 3]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEnthalpy_quantum(self):
"""
Test the HarmonicOscillator.getEnthalpy() method using a set of quantum
oscillators.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Hexplist = numpy.array([0.280395, 0.637310, 1.30209, 1.70542, 1.96142]) * constants.R * Tlist
for T, Hexp in zip(Tlist, Hexplist):
Hact = self.mode.getEnthalpy(T)
self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp)
def test_getEntropy_classical(self):
"""
Test the HarmonicOscillator.getEntropy() method using a set of
classical oscillators.
"""
self.mode.quantum = False
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([-1.70329, -0.170818, 1.90862, 3.12502, 3.98807]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*abs(Sexp))
def test_getEntropy_quantum(self):
"""
Test the HarmonicOscillator.getEntropy() method using a set of quantum
oscillators.
"""
self.mode.quantum = True
Tlist = numpy.array([300,500,1000,1500,2000])
Sexplist = numpy.array([0.384065, 0.969182, 2.29837, 3.31251, 4.09675]) * constants.R
for T, Sexp in zip(Tlist, Sexplist):
Sact = self.mode.getEntropy(T)
self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp)
def test_getSumOfStates_classical(self):
"""
Test the HarmonicOscillator.getSumOfStates() method using a set of
classical oscillators.
"""
self.mode.quantum = False
self.mode.frequencies = ([500, 1000],"cm^-1")
Elist = numpy.arange(0, 10000*11.96, 1*11.96)
sumStates = self.mode.getSumOfStates(Elist)
densStates = self.mode.getDensityOfStates(Elist)
for n in range(10, len(Elist)):
self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n]))
def test_getSumOfStates_quantum(self):
"""
Test the HarmonicOscillator.getSumOfStates() method using a set of
quantum oscillators.
"""
self.mode.quantum = True
Elist = numpy.arange(0, 10000*11.96, 1*11.96)
sumStates = self.mode.getSumOfStates(Elist)
densStates = self.mode.getDensityOfStates(Elist)
for n in range(1, len(Elist)):
if sumStates[n-1] == 0:
self.assertTrue(numpy.sum(densStates[0:n]) == 0, '{0} != {1}'.format(numpy.sum(densStates[0:n]), 0))
else:
self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n]))
def test_getDensityOfStates_classical(self):
"""
Test the HarmonicOscillator.getDensityOfStates() method using a set of
classical oscillators.
"""
self.mode.quantum = False
factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol
Elist = numpy.arange(0, 10000*factor, 1*factor)
densStates = self.mode.getDensityOfStates(Elist)
T = 100
Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T))
Qexp = self.mode.getPartitionFunction(T)
self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp)
def test_getDensityOfStates_quantum(self):
"""
Test the HarmonicOscillator.getDensityOfStates() method using a set of
quantum oscillators.
"""
self.mode.quantum = True
factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol
Elist = | numpy.arange(0, 10000*factor, 1*factor) | numpy.arange |
#!/usr/bin/env python
import sys
import os
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir+'/../neural_networks')
import numpy as np
import numpy.matlib
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import copy
import time
import neural_network_regr_multi as nn
import nn_navigation_value_multi as nn_nav
import pedData_processing_multi as pedData
import global_var as gb
import gen_rand_testcases as gen_tc
# setting up global variables
COLLISION_COST = gb.COLLISION_COST
DIST_2_GOAL_THRES = gb.DIST_2_GOAL_THRES
GETTING_CLOSE_PENALTY = gb.GETTING_CLOSE_PENALTY
GETTING_CLOSE_RANGE = gb.GETTING_CLOSE_RANGE
EPS = gb.EPS
# terminal states
NON_TERMINAL=gb.NON_TERMINAL
COLLIDED=gb.COLLIDED
REACHED_GOAL=gb.REACHED_GOAL
# plotting colors
plt_colors = gb.plt_colors
GAMMA = gb.RL_gamma
DT_NORMAL = gb.RL_dt_normal
TRAINING_DT = gb.TRAINING_DT
def compute_plot_stats(traj_raw_multi):
time_to_reach_goal, traj_lengths, min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
num_agents = len(traj_raw_multi) - 1
agents_speed = np.zeros((num_agents,))
for i in xrange(num_agents):
agents_speed[i] = traj_raw_multi[i+1][0,5]
agents_time = time_to_reach_goal
agents_len = traj_lengths
min_dist = min_sepDist
return agents_speed, agents_time, agents_len, min_dist
class NN_rl_training_param:
# sgd_step_size: initial eta (should decay as a function of time)
# reg_lambda: regularization parameter
# nb_iter: number of training iterations
# sgd_batch_size: batch size of each stochastic gradient descent step
# w_scale: parameter for initializing the neural network
def __init__(self, num_episodes, numpts_per_eps, expr_size, \
gamma, sgd_batch_size, greedy_epsilon):
self.num_episodes = num_episodes
self.numpts_per_eps = numpts_per_eps
self.expr_size = expr_size
self.gamma = gamma
self.sgd_batch_size = sgd_batch_size
self.greedy_epsilon = greedy_epsilon
def writeToFile(filename):
np_array = []
np_array.append(self.num_episodes)
np_array.append(self.numpts_per_eps)
np_array.append(self.expr_size)
np_array.append(self.gamma)
np_array.append(self.sgd_batch_size)
np_array.append(self.greedy_epsilon)
pickle.dump(np_array, open(filename, "wb"))
return
def loadFromFile(filename):
np_array = pickle.load(open(filename, "rb"))
self.num_episodes = np_array[0]
self.numpts_per_eps = np_array[1]
self.expr_size = np_array[2]
self.gamma = np_array[3]
self.sgd_batch_size = np_array[4]
self.greedy_epsilon = np_array[5]
return
class NN_rl:
def __init__(self, nn_rl_training_param, nn_training_param, value_net, ifSave):
self.nn_rl_training_param = nn_rl_training_param
self.nn_training_param = nn_training_param
self.training_score = []
self.bad_testcases = []; self.bad_testcases_tmp = []; self.bad_testcases_update_iter = []
self.eval_epsd_stride = 5
self.test_cases = preset_testCases()
self.value_net = value_net
self.value_net_copy = copy.deepcopy(value_net)
self.old_value_net = copy.deepcopy(value_net)
self.best_value_net = copy.deepcopy(value_net)
self.ifSave = ifSave
self.passing_side = 'none'
self.mode = self.value_net.mode+'_'
self.epsilon_use_other_net = 0.3
self.value_net_other = None
self.num_agents = value_net.num_agents
pass
def writeToFile(self, file_dir, iteration):
v_net_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/%d_agents_policy_iter_"%self.num_agents + str(iteration) + ".p"
score_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/%d_agents_RL_training_score.p"%self.num_agents
if self.ifSave:
self.value_net.nn.save_neural_network(v_net_fname)
pickle.dump(self.training_score, open(score_fname, "wb"))
pass
def loadFromFile(self, file_dir, v_net_filename):
filename_nn = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/" + v_net_filename
self.value_net.nn.load_neural_network(filename_nn)
self.value_net_copy.nn.load_neural_network(filename_nn)
score_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side + "/%d_agents_RL_training_score.p"%self.num_agents
try:
self.scores = pickle.load(open(score_fname,"rb"))
except:
print('no score file exists')
pass
def loadOldNet(self, file_dir, iteration):
v_net_fname = file_dir+"/../../pickle_files/multi/" + self.value_net.mode \
+ '_' + self.passing_side \
+ "/RL_selfplay/%d_agents_policy_iter_"%self.num_agents + str(max(0,iteration-100)) + ".p"
self.old_value_net.nn.load_neural_network(v_net_fname)
self.value_net.old_value_net = self.old_value_net
def deep_RL_train(self, file_dir):
t_start = time.time()
self.training_score = []
param = self.nn_rl_training_param
self.value_net.nn.initialize_derivatives()
self.value_net.old_value_net = self.old_value_net
# initialize experience
num_states = 7 + 8 * (self.num_agents - 1)
self.X = np.zeros((param.expr_size,num_states))
self.Y = np.zeros((param.expr_size,1))
self.values_diff = np.zeros((param.expr_size,))
self.current_expr_ind = 0
self.total_training_pts = 0
path_times = None
collisions = None
best_iter_time = np.inf
best_iter = 0
# for each episode
for kk in xrange(self.nn_rl_training_param.num_episodes):
numpts_cur_eps = 0
epsilon = 0.3 - np.amin((0.25, kk / 2500.0))
self.value_net.dt_forward = 1.0 #- np.amin((0.7, kk / 150.0))
self.value_net_copy.dt_forward = 1.0 #- np.amin((0.7, kk / 150.0))
self.value_net.radius_buffer = 0.0
self.value_net_copy.radius_buffer = 0.0
# self.value_net.passing_side_training_weight = 0.2 + np.amin((0.5, kk / 500.0))
# side_length = np.amin((6, 1.0 + kk / 50.0))
# if kk > 300:
# if kk % 2 == 0:
side_length = np.random.rand() * 4.0 + 3.0
# else:
# side_length = np.random.rand() * 2.0 + 1.0
# evaluate network
if kk % self.eval_epsd_stride == 0:
self.value_net.radius_buffer = 0.0
self.value_net_copy.radius_buffer = 0.0
path_times, collisions, values = \
self.evaluate_current_network(path_times, collisions, iteration=kk, plot_mode='one')
# score = np.array([np.sum(path_times), np.sum(collisions)])
score = np.hstack((path_times, collisions, values))
self.training_score.append(score)
num_cases = len(self.test_cases)
print('time: %.2f, epsd: %d, time: %.3f, value: %.3f, num_bad_cases: %.2f, best_iter %d' % (time.time()-t_start, kk, \
np.sum(score[0:num_cases]), np.sum(score[2*num_cases:3*num_cases]), len(self.bad_testcases), best_iter))
# plot a test case
if kk > 0 and self.current_expr_ind > 0:
ind = np.random.randint(0, np.max((1,self.current_expr_ind)))
x_plot = self.X[ind,:]
y_plot = self.Y[ind,:]
title_string = 'epsd: %d, time: %.1f, value: %.3f' % \
(kk, np.sum(score[0:num_cases]), np.sum(score[2*num_cases:3*num_cases]))
self.value_net.plot_ped_testCase(x_plot, y_plot, title_string, \
'test_case in RL self play')
# plot a training traj
agents_speed, agents_time, agents_len, min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'a%d, t %.2f, sp %.2f, len %.2f \n %s; min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+'training_traj' )
# reset value_net_copy to value_net
if kk % 5 == 0:
# cur_iter_time = np.sum(score[0:num_cases])
# # print best_iter_time, best_iter_time
# if best_iter_time > cur_iter_time:
# self.best_value_net = copy.deepcopy(self.value_net)
# best_iter_time = cur_iter_time
# best_iter = kk
# print 'recorded change at iteration', kk
self.value_net_copy = copy.deepcopy(self.value_net)
# if kk % 50 == 0:
# self.value_net = copy.deepcopy(self.best_value_net)
# raw_input()
# save
if kk % 50 == 0:
self.writeToFile(file_dir, kk)
# self.loadOldNet(file_dir, kk)
self.plot_training_score(file_dir)
# for stats
strt_line_training_pts = 0
nn_training_pts = 0
if kk < 200:
step_size = 1.0 / max(40+kk, kk)
else:
step_size = 1.0 / (2000+int(kk/1000)*1000)
while (numpts_cur_eps < param.numpts_per_eps):
is_permit_straight = np.random.binomial(1, 0.0)
is_overtake = np.random.binomial(1, 0.2)
# is_permit_straight = False
num_agents = self.num_agents
if_static = np.random.rand() < 0.2
# if kk < 200:
# if_static = True
if_end_near_bnd = np.random.rand() < 0.2
# train on bad cases
if_trained_on_badcases = False
if np.random.rand() < 0.5 and len(self.bad_testcases) > 0:
bad_case_ind = np.random.randint(len(self.bad_testcases))
if self.bad_testcases_update_iter[bad_case_ind] < kk - 1:
# if True:
if_trained_on_badcases = True
self.bad_testcases_update_iter[bad_case_ind] = kk
agents_state = self.bad_testcases[bad_case_ind]
num_repeat = 2
traj_raw_multi, x, y, values_diff, \
if_resolved = self.trainTestCase(agents_state, num_repeat)
if if_resolved == True or np.random.rand() > 0.8:
self.bad_testcases.pop(bad_case_ind)
self.bad_testcases_update_iter.pop(bad_case_ind)
self.bad_testcases_tmp = []
# print 'bad test case with %d /%d pts' % (len(x), len(x) + numpts_cur_eps)
if len(x) > 0:
x_train = self.value_net.nn.xRaw_2_x(x)
y_train = self.value_net.nn.yRaw_2_y(y)
# step_size = 1.0 / max(2000+kk, kk)
self.value_net.nn.set_training_stepsize('rmsprop')
self.value_net.nn.backprop(x_train, y_train, step_size, kk)
# print 'after len(self.bad_testcases)', len(self.bad_testcases)
# train on random cases
if if_trained_on_badcases == False:
test_case = gen_tc.generate_rand_test_case_multi(num_agents, side_length, \
np.array([0.1,1.2]), np.array([0.3, 0.5]), \
is_end_near_bnd=if_end_near_bnd, is_static = if_static)
# debugging
# if np.random.rand() > 0.0: #0.5:
# test_case = self.test_cases[np.random.randint(4)]
# test_case = self.test_cases[1]
# print 'self.value_net.dt_forward', self.value_net.dt_forward
x = []; y = [];
if len(x) == 0:
ifRandHeading = np.random.binomial(1, 0.3)
# ifRandHeading = False
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, rl_epsilon=epsilon, \
figure_name='no_plot', stopOnCollision=True, ifRandHeading=ifRandHeading,\
ifNonCoop=True)
num_pts = len(traj_raw_multi[0])
if num_pts < 2:
continue
# print 'generate traj test case'
# pedData.plot_traj_raw_multi(traj_raw, 'what is wrong', figure_name='tmp_traj' )
x, y, values_diff = self.rawTraj_2_trainingData(traj_raw_multi, param.gamma, kk)
nn_training_pts += len(x)
if np.random.rand() > 0.9:
traj_raw = pedData.reflectTraj(traj_raw_multi)
agents_speed, agents_time, agents_len, min_dist = compute_plot_stats(traj_raw_multi)
if len(self.bad_testcases_tmp) > 0:
if len(self.bad_testcases) < 50:
self.bad_testcases += self.bad_testcases_tmp
self.bad_testcases_update_iter += [kk-1] * len(self.bad_testcases_tmp)
self.bad_testcases_tmp = []
# print 'rand test case with %d /%d pts' % (len(x), len(x) + numpts_cur_eps)
if len(x) > 0:
self.append_to_experience(x, y, values_diff, param.expr_size)
numpts_cur_eps += len(x)
# print 'numpts_cur_eps', numpts_cur_eps
# train the value network
for tt in xrange(2):
# sample a random minibatch
nb_examples = min(self.total_training_pts, param.expr_size)
# half good and half bad
if np.random.rand() > 1.1:
minibatch = np.random.permutation(np.arange(nb_examples))[0:param.sgd_batch_size*2]
# bad_inds = np.where(self.values_diff>0.05)[0]
# half_num = param.sgd_batch_size/2
# if len(bad_inds) > half_num:
# minibatch_bad = np.argpartition(self.values_diff, -half_num)[-half_num:]
# minibatch_rand = np.random.permutation(np.arange(nb_examples))[0:half_num:]
# minibatch = np.union1d(minibatch_bad, minibatch_rand)
# else:
# minibatch = bad_inds
# print 'here'
values_raw = np.squeeze(self.value_net_copy.nn.make_prediction_raw(self.X[:nb_examples,:]))
values_diff = abs((values_raw - np.squeeze(self.Y[:nb_examples]))\
/np.squeeze(self.Y[:nb_examples]))
half_num = param.sgd_batch_size / 2.0
minibatch_bad = np.argpartition(values_diff, -half_num)[-half_num:]
# np.set_printoptions(edgeitems=4, precision=4,formatter={'float': '{: 0.4f}'.format})
# print 'max', values_diff[minibatch_bad]
# print 'dist', self.X[minibatch_bad,0:7]
# raw_input()
# print 'rand', values_diff[0:nb_examples]
# raw_input()
minibatch = minibatch_bad
minibatch_rand = np.random.permutation(np.arange(nb_examples))[0:half_num:]
# print minibatch_bad.shape
# print minibatch_rand.shape
minibatch = np.union1d(minibatch_bad, minibatch_rand)
else:
minibatch = np.random.permutation(np.arange(nb_examples))[0:param.sgd_batch_size]
# max_dist_inds = np.argpartition(self.X[:,0], int(nb_examples/10))[-int(nb_examples/5):]
# minibatch = np.union1d(minibatch, max_dist_inds)
# print minibatch
# scale using nn coordinate
x_train_raw = self.X[minibatch,:]
y_train_raw = self.Y[minibatch]
# if self.total_training_pts > param.expr_size and kk > 0: #30:
# print 'median', np.median(x_train_raw, axis=0)
# print 'mean', np.mean(x_train_raw, axis=0)
# print 'std', np.std(x_train_raw, axis=0)
# print 'rel_median', (np.median(x_train_raw, axis=0) - self.value_net.nn.avg_vec) / self.value_net.nn.std_vec
# print 'rel_std', np.std(x_train_raw, axis=0) / self.value_net.nn.std_vec
# print 'min', np.amin(x_train_raw, axis=0)
# print 'max', np.amax(x_train_raw, axis=0)
# print 'iter', kk
# raw_input()
x_train = self.value_net.nn.xRaw_2_x(x_train_raw)
y_train = self.value_net.nn.yRaw_2_y(y_train_raw)
# check
# try:
# assert(np.all(np.squeeze(y_train_raw) <= (0.97**(x_train_raw[:,0]/0.2)+0.01)))
# except AssertionError:
# num_pts = len(y_train_raw)
# print 'num_pts', num_pts
# for i in xrange(num_pts):
# if True: #y_train_raw[i] > 0.97**(x_train_raw[i,0]/0.2) + 0.01:
# # print '---'
# # print 'x_train[i,:]', x_train_raw[i,:]
# print 'y_train[i] - bnd', y_train_raw[i] - 0.97**(x_train_raw[i,0]/0.2)
# assert(0)
# update value network
# print step_size
# step_size = 0.0
# self.value_net.nn.set_training_stepsize('fixed_decay')
# self.value_net.nn.set_training_stepsize('momentum')
self.value_net.nn.set_training_stepsize('rmsprop')
self.value_net.nn.backprop(x_train, y_train, step_size, kk)
# print ' added %d strt_line pts, %d nn_pts' % (strt_line_training_pts, nn_training_pts)
# plot at end of training
self.plot_training_score(file_dir)
self.evaluate_current_network()
def plot_training_score(self, file_dir):
if len(self.training_score) > 0:
fig = plt.figure('training score', figsize=(10,8))
plt.clf()
ax1 = fig.add_subplot(1,1,1)
ax2 = ax1.twinx()
episodes = self.eval_epsd_stride * np.arange(len(self.training_score))
num_cases = self.training_score[0].shape[0] / 3
scores_np = np.asarray(self.training_score)
total_time_vec = np.sum(scores_np[:,0:num_cases], axis=1)
collision_vec = np.sum(scores_np[:,num_cases:2*num_cases], axis=1)
value_vec = np.sum(scores_np[:,2*num_cases:3*num_cases], axis=1)
ax1.plot(episodes, total_time_vec, 'b')
ax2.plot(episodes, value_vec, 'r')
ax1.set_xlabel('episode')
ax1.set_ylabel('time (s)')
ax2.set_ylabel('value')
plt.draw()
plt.pause(0.0001)
if self.ifSave:
plt.savefig(file_dir+"/../../pickle_files/multi/"+ self.value_net.mode +\
'_' + self.passing_side + "/%d_agents_training_score.png"%self.num_agents,bbox_inches='tight')
else:
print('no training score')
def append_to_experience(self, x, y, values_diff, expr_size):
num_pts = len(x)
assert(num_pts == len(y))
assert(num_pts < expr_size)
gamma = GAMMA
dt_normal = DT_NORMAL
for i in xrange(num_pts):
try:
assert(y[i] <= gamma ** (x[i,0]/dt_normal)+0.0001)
assert(x[i,1] > 0.1 - EPS)
except:
print('x', x[i,:])
print('y', y[i])
print('bnd', gamma ** (x[i,0]/dt_normal))
assert 0, 'not valid training point'
if self.current_expr_ind + num_pts < expr_size:
end_ind = self.current_expr_ind + num_pts
self.X[self.current_expr_ind:end_ind,:] = x
self.Y[self.current_expr_ind:end_ind,:] = y
self.values_diff[self.current_expr_ind:end_ind] = values_diff
self.current_expr_ind = end_ind
else:
y_num_pts = expr_size - self.current_expr_ind
self.X[self.current_expr_ind:expr_size,:] = x[0:y_num_pts,:]
self.Y[self.current_expr_ind:expr_size,:] = y[0:y_num_pts,:]
self.values_diff[self.current_expr_ind:expr_size] = values_diff[0:y_num_pts]
self.X[0:num_pts-y_num_pts,:] = x[y_num_pts:num_pts,:]
self.Y[0:num_pts-y_num_pts,:] = y[y_num_pts:num_pts,:]
self.values_diff[0:num_pts-y_num_pts] = values_diff[y_num_pts:num_pts]
self.current_expr_ind = num_pts - y_num_pts
self.total_training_pts += num_pts
# print 'self.current_expr_ind', self.current_expr_ind
# print 'self.total_training_pts', self.total_training_pts
# try:
# if y[0] < 0:
# print x
# print y
# t = raw_input('press any key to continue: ')
# except:
# print x
# print y
# assert(0)
return
def evaluate_current_network(self, prev_path_times=None, prev_collisions=None, iteration=0, plot_mode='all'):
num_test_cases = len(self.test_cases)
path_times = np.zeros((num_test_cases,), dtype=float)
collisions = np.zeros((num_test_cases,), dtype=bool)
plot_number = np.random.randint(len(self.test_cases))
values = np.zeros((num_test_cases,), dtype=float)
for i, test_case in enumerate(self.test_cases):
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, figure_name='no_plot', stopOnCollision=False)
# plotting (debugging)
agents_speed, agents_time, agents_len, min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'case: %d; a%d, t %.2f, sp %.2f, len %.2f \n %s; min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(i, 0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
if_collided = min_dist < 0.0
collisions[i] = if_collided
path_times[i] = np.sum(agents_time)
if plot_mode == 'all': # plot every time case
pedData.plot_traj_raw_multi(traj_raw_multi, title_string)
# % (i, agent_1_time, agent_2_time, total_time)
elif plot_mode == 'one' and i == plot_number: # only plot one test case
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+'evaluate')
else:
pass
# plot bad trajectories
if iteration > 200 and prev_path_times!=None and \
(collisions[i] == True or (path_times[i] - prev_path_times[i]) > 3.0):
figure_name_str = 'bad_traj_tc_%d' % (i)
title_string = ('iter %d ;' % iteration) + title_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name=self.mode+figure_name_str)
agent_state = traj_raw_multi[1][0,:]
other_agents_state = []
num_agents = len(traj_raw_multi) - 1
for tt in xrange(1, num_agents):
other_agents_state.append(traj_raw_multi[tt+1][0,:])
values[i] = self.value_net.find_states_values(agent_state, other_agents_state)
# np.set_printoptions(precision=4)
value_str = ' tc(0-%d)' % num_test_cases
path_times_str = ' tc(0-%d)' % num_test_cases
for tt in xrange(num_test_cases):
value_str += ', %.3f' % values[tt]
path_times_str += ', %.3f' % path_times[tt]
print(value_str)
print(path_times_str)
return path_times, collisions, values
# for plotting purposes
def plot_test_cases(self, folder_dir, filename_str, format_str):
for i, test_case in enumerate(self.test_cases):
traj_raw_multi, time_to_complete = \
self.value_net.generate_traj(test_case, figure_name='no_plot')
# file name (iteration # and test case #)
filename = folder_dir + '/tc' + str(i) + '_' + filename_str + format_str
# trajectory stats
# a1_speed = traj_raw[0,6]
# a2_speed = traj_raw[0,15]
# a1_len = np.sum(np.linalg.norm(traj_raw[0:-1, 1:3] - traj_raw[1:, 1:3], axis=1)) + \
# np.linalg.norm(traj_raw[-1, 1:3] - traj_raw[-1, 7:9])
# a2_len = np.sum(np.linalg.norm(traj_raw[0:-1, 10:12] - traj_raw[1:, 10:12], axis=1)) + \
# np.linalg.norm(traj_raw[-1, 10:12] - traj_raw[-1, 16:18])
# min_dist = np.amin(np.linalg.norm(traj_raw[:,1:3]-traj_raw[:,10:12], axis=1)) - \
# traj_raw[0,9] - traj_raw[0,18]
agents_speed, agents_time, agents_len, min_dist = compute_plot_stats(traj_raw_multi)
title_string = 'case: %d; a%d, t %.2f, sp %.2f, len %.2f \n %s; min_dist %.2f a%d t %.2f, sp %.2f, len %.2f' % \
(i, 0, agents_time[0], agents_speed[0], agents_len[0], \
self.passing_side, min_dist, 1, agents_time[1], agents_speed[1], agents_len[1])
num_agents = len(traj_raw_multi) - 1
if num_agents > 2:
for tt in xrange(2, num_agents):
agent_string = '\n a%d, t %.2f, sp %.2f, len %.2f' % \
(tt, agents_time[tt], agents_speed[tt], agents_len[tt])
title_string += agent_string
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, 'plot_test_cases')
if self.ifSave:
plt.savefig(filename, bbox_inches='tight')
# find intended next states(traj_raw_multi)
# def find_intended_future_state_value(self, agent_state, agent_action_xy, other_agents_state, dt_forward):
# num_states = 7 + 8 * (self.num_agents - 1)
# agent_action_theta = np.array([np.linalg.norm(agent_action_xy), \
# np.arctan2(agent_action_xy[1], agent_action_xy[0])])
# # forward propagate to next states
# dt = dt_forward
# num_other_agents = len(other_agents_state)
# agent_next_state = self.value_net_copy.update_state(agent_state, agent_action_theta, dt)
# others_action_xy = [other_agents_state[tt][2:4] for tt in xrange(num_other_agents)]
# others_next_state = []
# for tt in xrange(num_other_agents):
# # print np.linalg.norm(others_action_xy[tt])
# # print np.arctan2(others_action_xy[tt][1], others_action_xy[tt][0])
# action_theta = np.array([np.linalg.norm(others_action_xy[tt]), \
# np.arctan2(others_action_xy[tt][1], others_action_xy[tt][0]) ])
# others_next_state.append(self.value_net_copy.update_state(other_agents_state[tt], \
# action_theta, dt))
# # value of next state
# # dt_backup = 1.0
# ref_prll_vec, ref_orth_vec, state_nn = \
# pedData.rawState_2_agentCentricState(\
# agent_next_state, others_next_state, self.num_agents)
# value = self.value_net_copy.find_states_values(agent_next_state, others_next_state)
# return state_nn, value
# find intended next states(traj_raw_multi)
def find_deviation_cases(self, traj_raw_multi):
time_to_reach_goal, traj_lengths, min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
num_agents = len(traj_raw_multi) - 1
time_vec = traj_raw_multi[0]
num_pts = len(time_vec)
max_deviation = 0.0
max_deviation_ind = 0.0
max_ind_dt_forward = 0.0
future_time_ind = 0
for j in xrange(1,num_pts-1):
deviation_vec = np.zeros((num_agents,))
while time_vec[future_time_ind] - time_vec[j] < 1.0 \
and future_time_ind<num_pts-1:
future_time_ind += 1
if future_time_ind >= num_pts:
break
dt_forward = time_vec[future_time_ind] - time_vec[j]
for i in xrange(num_agents):
if time_to_reach_goal[i] > future_time_ind:
continue
agent_state_pos = traj_raw_multi[i+1][j,0:2]
agent_action_xy_chosen = traj_raw_multi[i+1][j+1,2:4]
agent_intended_pos = agent_state_pos + \
agent_action_xy_chosen * dt_forward
agent_future_pos = traj_raw_multi[i+1][future_time_ind ,0:2]
deviation_vec[i] = np.linalg.norm(agent_intended_pos - \
agent_future_pos) / traj_raw_multi[i+1][0,5]
max_deviation_tmp = np.max(deviation_vec)
if max_deviation_tmp > max_deviation:
max_deviation = max_deviation_tmp
max_deviation_ind = j
max_ind_dt_forward = dt_forward
# build test case
test_case = np.zeros((num_agents, 6))
j = max_deviation_ind
dt_forward = max_ind_dt_forward
for i in xrange(num_agents):
test_case[i,0:2] = traj_raw_multi[i+1][j,0:2] + \
dt_forward * traj_raw_multi[i+1][j+1,2:4]
test_case[i,2:4] = traj_raw_multi[i+1][j,6:8]
test_case[i,4] = traj_raw_multi[i+1][j,5]
test_case[i,5] = traj_raw_multi[i+1][j,8]
# print dt_forward
# print test_case
# raw_input()
return test_case
# returns
# time_2_goal_vec, time_2_goal_bnd, agent_centric_states, values, action_rewards
def rawTraj_2_trainingStats(self, time_vec, traj_raw_multi, agent_num, iteration=0):
num_pts = len(time_vec)
# compute stats
# print time_vec.shape, agent_states.shape, other_agent_states.shape
agent_states = traj_raw_multi[agent_num+1]
other_agents_states = [traj_raw_multi[tt] for tt in \
xrange(1, len(traj_raw_multi)) if tt!=agent_num+1]
# print 'agent_number+1', agent_num+1
# print 'other', [tt for tt in \
# xrange(1, len(traj_raw_multi)) if tt!=agent_num+1]
time_to_reach_goal, traj_lengths, min_sepDist, if_completed_vec \
= pedData.computeStats(traj_raw_multi)
agent_speed = agent_states[0,5]
# initialize return values
time_2_goal_vec = np.empty((num_pts,)); time_2_goal_vec[:] = np.nan
time_2_goal_bnd = np.empty((num_pts,)); time_2_goal_bnd[:] = np.nan
num_states = 7 + 8 * (self.num_agents -1)
agent_centric_states = np.zeros((num_pts, num_states))
values = np.zeros((num_pts,))
action_rewards = np.zeros((num_pts,))
gamma = GAMMA
dt_normal = DT_NORMAL
agent_desired_speed = agent_speed
counter = 0
time_bnd = np.linalg.norm(agent_states[0,0:2]-agent_states[0,6:8])/agent_states[0,5]
ifReachedGoal = False
# filter speeds
num_other_agents = len(other_agents_states)
other_agents_filtered_vel = np.zeros((num_pts, num_other_agents * 2))
dt_vec = time_vec.copy(); dt_vec[1:] = time_vec[1:] - time_vec[:-1]; dt_vec[0] = dt_vec[1]
time_past_one_ind = 0
for i in xrange(num_pts):
while time_vec[i] - time_vec[time_past_one_ind] > 0.45:
time_past_one_ind += 1
agent_pos = agent_states[i,0:2]
dt_past_vec = dt_vec[time_past_one_ind:i+1]
for j in xrange(num_other_agents):
past_vel = other_agents_states[j][time_past_one_ind:i+1,2:5]
if np.linalg.norm(agent_pos - other_agents_states[j][i,0:2]) < 0.5:
other_agents_filtered_vel[i,j*2:(j+1)*2] = \
nn_nav.filter_vel(dt_past_vec, past_vel, ifClose=True)
else:
other_agents_filtered_vel[i,j*2:(j+1)*2] = \
nn_nav.filter_vel(dt_past_vec, past_vel, ifClose=False)
for i in xrange(num_pts):
counter += 1
agent_state = agent_states[i,:]
other_agents_state = [other_agents_states[tt][i,:].copy() for tt in xrange(len(other_agents_states))]
# for j in xrange(num_other_agents):
# # print i,j, 'before', other_agents_state[j][2:4]
# other_speed = other_agents_filtered_vel[i,j*2]
# other_angle = other_agents_filtered_vel[i,j*2+1]
# other_agents_state[j][2] = other_speed * np.cos(other_angle)
# other_agents_state[j][3] = other_speed * np.sin(other_angle)
# # print 'after', other_agents_state[j][2:4]
# raw_input()
# print 'd_2_goal', np.linalg.norm(agent_state[0:2] - agent_state[6:8])
# print 'time %.3f, time_to_reach_goal %.3f' %(time_vec[i], time_to_reach_goal[agent_num])
# print '---- ifReachedGoal ---', ifReachedGoal
# time 2 goal
if ifReachedGoal:
time_2_goal_vec[i] = 0.0
elif if_completed_vec[agent_num]:
time_2_goal_vec[i] = time_to_reach_goal[agent_num] - time_vec[i]
try:
assert(time_2_goal_vec[i] > -EPS)
except AssertionError:
print(time_to_reach_goal[agent_num])
print(time_vec[i])
assert(0)
# # agent_centric_state
# agent_speed = agent_state[5]
# assert(agent_speed > 0.1 - EPS)
# dt_backward_max = max(self.value_net.dt_forward, 0.5/agent_speed)
# # dt_forward_max = self.dt_forward
# dist_to_goal = np.linalg.norm(agent_state[6:8]- agent_state[0:2])
# time_to_goal = dist_to_goal / agent_speed
# dt_backward= min(dt_backward_max, time_to_goal) #1.0
# ii = i
# while ii > 0:
# if time_vec[i] - time_vec[ii] > dt_backward:
# ii = ii - 1
# other_agents_past_state = [other_agents_states[tt][ii,:].copy() for tt in xrange(len(other_agents_states))]
# ref_prll, ref_orth, state_nn = \
# pedData.rawState_2_agentCentricState( \
# agent_state, other_agents_past_state, self.num_agents)
# agent_centric_states[i,:] = state_nn.copy()
ref_prll, ref_orth, state_nn = \
pedData.rawState_2_agentCentricState( \
agent_state, other_agents_state, self.num_agents)
agent_centric_states[i,:] = state_nn.copy()
# time_2_goal_bnd
time_2_goal_bnd[i] = state_nn[0] / agent_speed
# time_2_goal_bnd[i] = time_bnd - time_vec[i]
# action_rewards and values
if i == 0:
values[0] = self.value_net_copy.find_states_values(agent_state, other_agents_state)
if i < num_pts - 1:
# note i+1
agent_next_state = agent_states[i+1,:]
other_agents_next_state = [other_agents_states[tt][i+1,:] for tt in xrange(len(other_agents_states))]
dt_forward = time_vec[i+1] - time_vec[i]
state_value, action_reward = \
self.value_net_copy.find_next_state_pair_value_and_action_reward(agent_state, \
agent_next_state, other_agents_state, \
other_agents_next_state, dt_forward)
# print 'method 1: state_value, ', state_value1
cur_dist_vec = [np.linalg.norm(agent_state[0:2] - other_agent_state[0:2])-\
agent_state[8]-other_agent_state[8] for \
other_agent_state in other_agents_state]
cur_dist = min(cur_dist_vec)
# min_dists = [np.linalg.norm(agent_next_state[0:2] - other_agent_next_state[0:2])-\
# agent_next_state[8]-other_agent_next_state[8] for \
# other_agent_next_state in other_agents_next_state]
# # print 'i, cur_dist, next_dist', i, cur_dist, min(min_dists)
# # min_dist = np.array([min(min_dists)]) #- np.random.rand() * 0.05
# min_dist = np.array([cur_dist]) + 1.0
action_reward = self.value_net_copy.find_action_rewards_train(agent_state, \
cur_dist, dt_forward)
# action_reward_min = min(action_reward, action_reward_2)
# if action_reward_min < -EPS:
# print action_reward, action_reward_2, action_reward < action_reward_2
# raw_input()
# action_reward = action_reward_min
if abs(state_value) < EPS:
state_value = 0.01
# state_value = self.value_net_copy.find_states_values(agent_next_state, other_agents_next_state)
# # print 'method 2: state_value, ', state_value
# if abs(state_value1 - state_value) > 0.01:
# print 'method 1: state_value, ', state_value1
# print 'method 2: state_value, ', state_value
# print 'num_agents', len(other_agents_state)
# print ' --- 111 ---'
# state_value1, action_reward = \
# self.value_net_copy.find_next_state_pair_value_and_action_reward(agent_state, \
# agent_next_state, other_agents_state, \
# other_agents_next_state, dt_forward)
# print ' --- 222 ---'
# state_value = self.value_net_copy.find_states_values(agent_next_state, other_agents_next_state)
# raw_input()
action_rewards[i] = action_reward
values[i+1] = state_value
if i == num_pts - 1:
cur_dist_vec = [np.linalg.norm(agent_state[0:2] - other_agent_state[0:2])-\
agent_state[8]-other_agent_state[8] for \
other_agent_state in other_agents_state]
cur_dist = min(cur_dist_vec)
min_dists = np.array(cur_dist_vec) + 1.0
dt_forward = 1.0
action_rewards[i] = self.value_net_copy.find_action_rewards(agent_state, \
cur_dist, min_dists, dt_forward)[0]
# terminal states
is_terminal_state = self.value_net_copy.if_terminal_state(agent_state, other_agents_state)
if is_terminal_state == COLLIDED:
values[i] = COLLISION_COST
action_rewards[i] = 0.0
break
elif is_terminal_state == REACHED_GOAL:
Dt_bnd = state_nn[0] / state_nn[1]
values[i] = (gamma ** (Dt_bnd * state_nn[1] / dt_normal))
action_rewards[i] = 0.0
ifReachedGoal = True
break
# sufficiently close to goal but also close to the other agent
elif np.linalg.norm(agent_state[0:2]-agent_state[6:8]) < DIST_2_GOAL_THRES:
Dt_bnd = state_nn[0] / state_nn[1]
values[i] = (gamma ** (Dt_bnd * state_nn[1] / dt_normal))
ifReachedGoal = True
break
# debug
# print 'time, dist_to_goal, pref_speed', time_vec[i], \
# np.linalg.norm(agent_state[0:2]-agent_state[6:8]), agent_state[5]
# if np.linalg.norm(agent_state[0:2]-agent_state[6:8])<DIST_2_GOAL_THRES:
# print 'here'
# print agent_state
# print other_agent_state
# print np.linalg.norm(agent_state[0:2]-other_agent_state[0:2])- \
# agent_state[8]-other_agent_state[8]
eff_pts = min(num_pts, counter)
# print 'num_pts, counter, eff_pts', num_pts, counter, eff_pts
try:
assert(num_pts>0)
except:
for i in xrange(1,len(traj_raw_multi)):
print(traj_raw_multi[i][0,:])
assert(0)
return time_2_goal_vec[0:eff_pts], time_2_goal_bnd[0:eff_pts], \
agent_centric_states[0:eff_pts,:], values[0:eff_pts], action_rewards[0:eff_pts]
def rawTraj_2_trainingData(self, traj_raw_multi, gamma, iteration, ifOnlyFirstAgent=False):
time_vec = traj_raw_multi[0]
num_agents = len(traj_raw_multi) - 1
agents_time_2_goal_vec_list = []
agents_time_2_goal_bnd_list = []
agents_centric_states_list = []
agents_values_list = []
agents_action_reward_list = []
agents_extra_time_list = []
X = []; Y = []; values_diff = []
for tt in xrange(num_agents):
time_2_goal_vec, time_2_goal_bnd, agent_centric_states, \
values, action_rewards = self.rawTraj_2_trainingStats( \
time_vec, traj_raw_multi, tt, iteration=iteration)
extra_time = self.computeExtraTime(time_2_goal_vec,time_2_goal_bnd, \
time_vec[0:len(time_2_goal_bnd)])
agents_time_2_goal_vec_list.append(time_2_goal_vec)
agents_time_2_goal_bnd_list.append(time_2_goal_bnd)
agents_centric_states_list.append(agent_centric_states)
agents_values_list.append(values)
agents_action_reward_list.append(action_rewards)
agents_extra_time_list.append(extra_time)
dt = TRAINING_DT
for tt in xrange(num_agents):
if ifOnlyFirstAgent and tt > 0:
break
# skip straight line trajectories
# if abs(agents_time_2_goal_vec_list[tt][0] - np.linalg.norm(traj_raw_multi[tt+1][0,0:2]-\
# traj_raw_multi[tt+1][0,6:8])/traj_raw_multi[tt+1][0,5]) < EPS:
path_length = np.linalg.norm(traj_raw_multi[tt+1][0,0:2]-\
traj_raw_multi[tt+1][0,6:8])
exp_min_time = path_length /traj_raw_multi[tt+1][0,5]
if_completed = np.isnan(agents_time_2_goal_vec_list[tt][0]) == False
if path_length < EPS or (if_completed and (agents_time_2_goal_vec_list[tt][0] / exp_min_time < 1.05)):
continue
agent_num_pts = len(agents_time_2_goal_bnd_list[tt])
# don't include stationary agents
# if agent_num_pts < 2:
# continue
other_agents_extra_time = [agents_extra_time_list[i] for i in xrange(num_agents) if i!=tt]
other_agents_states = [traj_raw_multi[i+1] for i in xrange(num_agents) if i!=tt]
agent_states = traj_raw_multi[tt+1]
X1, Y1, values_diff1 = self.trainingStats_2_trainingData(time_vec[0:agent_num_pts], dt, \
agents_time_2_goal_vec_list[tt], agents_time_2_goal_bnd_list[tt], agents_centric_states_list[tt], \
agents_values_list[tt], agents_action_reward_list[tt], other_agents_extra_time, \
agent_states, other_agents_states, iteration, traj_raw_multi=traj_raw_multi)
# print X1[1,:]
# print Y1[1,:]
# raw_input()
if len(X) == 0:
X = X1.copy()
Y = Y1.copy()
values_diff = values_diff1
else:
X = np.vstack((X, X1.copy()))
Y = np.vstack((Y, Y1.copy()))
values_diff = np.hstack((values_diff, values_diff1))
# X_future, Y_future = self.find_intended_future_states(traj_raw_multi)
# X = np.vstack((X, X_future.copy()))
# Y = np.vstack((Y, Y_future.copy()))
# num_pts = len(X)
# num_pts_thres = 300
# if num_pts > num_pts_thres:
# minibatch = np.random.permutation(np.arange(num_pts))[0:num_pts_thres]
# X = X[minibatch,:]
# Y = Y[minibatch,:]
return X, Y, values_diff
# def debug_rawTraj_2_trajStats(self):
# for i, test_case in enumerate(self.test_cases):
# if i != 2:
# continue
# traj_raw, agent_1_time, agent_2_time, if_collided = \
# self.value_net.generate_traj(test_case, figure_name='no_plot')
# traj_raw_multi = pedData.traj_raw_2_traj_raw_multi(traj_raw)
# time_vec = traj_raw_multi[0]
# agent_states = traj_raw_multi[1]
# other_agent_states = traj_raw_multi[2]
# time_vec = traj_raw[:,0]
# agent_1_states = traj_raw[:,1:10]
# agent_2_states = traj_raw[:,10:19]
# a1_time_2_goal_vec, a1_time_2_goal_bnd, a1_agent_centric_states, \
# a1_values, a1_action_rewards = self.rawTraj_2_trainingStats( \
# time_vec, agent_states, other_agent_states)
# # np.set_printoptions(precision=4,formatter={'float': '{: 0.3f}'.format})
# # zero_inds = np.where(a1_action_rewards<EPS)[0]
# # a1_action_rewards[zero_inds] = 0
# # print a1_action_rewards[zero_inds]
# a2_time_2_goal_vec, a2_time_2_goal_bnd, a2_agent_centric_states, \
# a2_values, a2_action_rewards = self.rawTraj_2_trainingStats( \
# time_vec, other_agent_states, agent_states)
# # zero_inds = np.where(a2_action_rewards<EPS)[0]
# # a2_action_rewards[zero_inds] = 0
# # print a2_action_rewards[zero_inds]
# print '--- test_case %d --- ' % i
# self.rawTraj_2_trajStats(time_vec, agent_1_states, agent_2_states, \
# a1_time_2_goal_vec, a1_agent_centric_states, ifPlot=True)
# self.rawTraj_2_trajStats(time_vec, agent_2_states, agent_1_states, \
# a2_time_2_goal_vec, a2_agent_centric_states, ifPlot=True)
# gamma = 0.97
# X, Y = self.rawTraj_2_trainingData(traj_raw, gamma, 0)
# compute trajectory properties, such as passing on the left of the other vehicle
def rawTraj_2_trajStats(self, time_vec, agent_states, other_agent_states, \
time_2_goal_vec, agent_centric_states, iteration=0, ifPlot=False):
num_pts = len(time_vec) - 1
if np.isnan(time_2_goal_vec[0]):
return np.ones((num_pts,))
bad_inds_oppo, bad_inds_same, bad_inds_tangent = \
self.value_net.find_bad_inds(agent_centric_states)
#scaling factor
d = np.linalg.norm(agent_states[:-1,0:2] - agent_states[:-1,6:8], axis=1)
v = agent_states[0,5]
getting_close_penalty = GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL))
penalty = np.zeros((num_pts,))
penalty[bad_inds_oppo] = 0.7 * getting_close_penalty[bad_inds_oppo]
penalty[bad_inds_same] = 0.7 * getting_close_penalty[bad_inds_same]
penalty[bad_inds_tangent] = 0.7 * getting_close_penalty[bad_inds_tangent]
time_2_goal_upper_bnd = np.zeros((num_pts,))
time_2_goal_upper_bnd[bad_inds_oppo] = time_2_goal_vec[bad_inds_oppo] + 1.0
time_2_goal_upper_bnd[bad_inds_same] = time_2_goal_vec[bad_inds_same] + 1.0
time_2_goal_upper_bnd[bad_inds_tangent] = time_2_goal_vec[bad_inds_tangent] + 1.0
dt_normal = DT_NORMAL
value_upper_bnd = GAMMA ** (time_2_goal_upper_bnd * agent_states[0,5] / dt_normal)
# print dt_normal
# print value_upper_bnd
# raw_input()
# penalty[bad_inds_same] += -0.2
# penalty = np.clip(penalty, -0.1, 0.0)
if ifPlot: #len(bad_inds_oppo) > 3 or len(bad_inds_same) or len(bad_inds_tangent) :
# print 'heading_diff[bad_inds_oppo]', heading_diff[bad_inds_oppo]
# print 'tangent_inds', tangent_inds
# print 'stationary_inds', stationary_inds
traj_raw = np.hstack((time_vec[:,np.newaxis], agent_states, other_agent_states))
pedData.plot_traj_raw_multi(traj_raw, 'from rawTraj_2_trajStats', figure_name="raw_traj")
if len(bad_inds_oppo) > 0:
print('bad_inds_oppo', bad_inds_oppo)
traj_raw_bad = np.hstack((time_vec[bad_inds_oppo,np.newaxis], agent_states[bad_inds_oppo,:], \
other_agent_states[bad_inds_oppo,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds oppo', figure_name="bad_inds_oppo")
# raw_input()
if len(bad_inds_same) > 0:
print('bad_inds_same', bad_inds_same)
traj_raw_bad = np.hstack((time_vec[bad_inds_same,np.newaxis], agent_states[bad_inds_same,:], \
other_agent_states[bad_inds_same,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds same', figure_name="bad_inds_same")
# raw_input()
if len(bad_inds_tangent) > 0:
print('bad_inds_tangent', bad_inds_tangent)
traj_raw_bad = np.hstack((time_vec[bad_inds_tangent,np.newaxis], agent_states[bad_inds_tangent,:], \
other_agent_states[bad_inds_tangent,:]))
# print('traj_raw_bad', traj_raw_bad)
pedData.plot_traj_raw_multi(traj_raw_bad, 'from rawTraj_2_trajStats, bad inds tangent', figure_name="bad_inds_tangent")
# raw_input()
print(penalty)
raw_input()
# if iteration < 200:
# penalty[bad_inds_same] = 3.0 * getting_close_penalty[bad_inds_same]
# return penalty
return value_upper_bnd
def computeExtraTime(self, time_2_goal_vec, time_bnd, time_vec):
# method 1
# if np.isnan(time_2_goal_vec[0]):
# extra_time = np.zeros((len(time_2_goal_vec),))
# extra_time[:] = np.inf
# else:
# extra_time = np.clip(time_2_goal_vec - time_bnd, 0, 100)
# try:
# assert(np.all(extra_time>-EPS))
# except AssertionError:
# print 'extra_time', extra_time
# print 'time_2_goal_vec', time_2_goal_vec
# print 'time_bnd', time_bnd
# assert(0)
# return extra_time
# print 'time_bnd', time_bnd
# print 'time_2_goal_vec',time_2_goal_vec
# print np.clip(time_2_goal_vec - time_bnd, 0, 100)
# method 2
if np.isnan(time_2_goal_vec[0]):
extra_time_individual = np.zeros((len(time_2_goal_vec),))
extra_time_individual[:] = np.inf
elif len(time_vec) < 2:
extra_time_individual = np.zeros((len(time_2_goal_vec),))
extra_time_individual[:] = 0
else:
dt_time_vec = time_vec.copy()
dt_time_vec[:-1] = time_vec[1:]-time_vec[:-1]; dt_time_vec[-1] = dt_time_vec[-2]
dt_2_goal = time_bnd.copy()
dt_2_goal[:-1] = time_bnd[:-1]-time_bnd[1:]; dt_2_goal[-1] = dt_2_goal[-2]
extra_time_individual_raw = dt_time_vec - dt_2_goal
try:
assert(np.all(extra_time_individual_raw>-EPS))
except AssertionError:
print('extra_time_individual_raw', extra_time_individual_raw)
print('dt_time_vec', dt_time_vec)
print('dt_2_goal', dt_2_goal)
assert(0)
# print 'extra_time_individual', extra_time_individual
width = 5
num_pts = len(extra_time_individual_raw)
extra_time_individual = extra_time_individual_raw.copy()
for i in xrange(num_pts):
extra_time_individual[i] = \
np.sum(extra_time_individual_raw[max(0,i-width):min(i+width, num_pts)])
return extra_time_individual
def minFutureRewards(self, action_rewards):
num_pts = len(action_rewards)
future_min_rewards = action_rewards.copy()
for i in xrange(num_pts):
future_min_rewards[i] = np.min(action_rewards[i:])
return future_min_rewards
def trainingStats_2_trainingData(self, time_vec, dt, time_2_goal_vec, \
time_2_goal_bnd, agent_centric_states, values, \
action_rewards, other_agents_extra_time, agent_states, other_agents_states, iteration, traj_raw_multi=None):
num_pts = len(time_vec)
num_states = 7 + 8 * (self.num_agents - 1)
X = np.zeros((num_pts,num_states)); X_future = np.zeros((num_pts,num_states)); X_stuck = np.zeros((0,num_states))
Y = np.zeros((num_pts,1)); Y_future = np.zeros((num_pts,1)); Y_stuck = np.zeros((0,1))
future_value_inds = []
extra_time = self.computeExtraTime(time_2_goal_vec,time_2_goal_bnd, time_vec)
dist_travelled_vec = np.linalg.norm(agent_states[1:,0:2]-agent_states[0:-1,0:2], axis=1)
dist_travelled_vec = np.append(dist_travelled_vec,[0])
# if len(other_extra_time) > num_pts:
# other_extra_time = other_extra_time[0:num_pts]
# else:
# other_extra_time_tmp = np.zeros((num_pts,))
# other_extra_time_tmp[0:len(other_extra_time)] = other_extra_time
# other_extra_time = other_extra_time_tmp
# if other agents have collided
if_other_collided = False
num_other_agents = len(other_agents_states)
for i in xrange(num_other_agents):
for j in xrange(i+1, num_other_agents):
dist = np.linalg.norm(other_agents_states[i][-1, 0:2] -
other_agents_states[j][-1, 0:2]) - \
other_agents_states[i][-1,8] - other_agents_states[j][-1,8]
if dist < 0:
if_other_collided = True
# if agent has collided with others
if_agent_collided = False
for i in xrange(num_other_agents):
dist = np.linalg.norm(agent_states[-1, 0:2] -
other_agents_states[i][-1, 0:2]) - \
agent_states[-1,8] - other_agents_states[i][-1,8]
if dist < 0.0:
if_agent_collided = True
break
# dist_2_others (see README.txt)
others_columns_inds = [7 + 6 + 8*(tt) for tt in xrange(num_other_agents)]
min_dist_2_others = np.min(agent_centric_states[:,others_columns_inds], axis = 1)
gamma = GAMMA
dt_normal = DT_NORMAL
agent_desired_speed = agent_centric_states[0,1]
j = 0
dt_forward_vec = np.zeros((len(time_2_goal_bnd),))
if_extra = False
if_stuck = False
if_stuck_counter = 0
counter = 0
for i in xrange(num_pts-1):
while time_vec[j] - time_vec[i] < dt and j < num_pts-1:
if min_dist_2_others[j+1] > 0 or j<=i:
j += 1
# elif min_dist_2_others[j] < GETTING_CLOSE_RANGE:
# break
else:
break
if i == num_pts - 1:
j = i
# skip points
# if time_2_goal_vec[i] < time_2_goal_bnd[i] * 1.01:
# # print 'time_2_goal_vec[i], time_2_goal_bnd[i]', time_2_goal_vec[i], time_2_goal_bnd[i]
# # raw_input()
# if np.random.rand() > 0.2:
# continue
# else:
# break
X[counter,:] = agent_centric_states[i,:]
# compute value using q-learning update
# print 'j, num_pts', j, num_pts
# print len(time_2_goal_vec), len(time_2_goal_bnd), \
# len(agent_centric_states), len(agent_centric_states), \
# len(values), len(action_rewards), len(other_extra_time)
# neural net output is non-sensible (negative value and zero reward)
value_bnd = (gamma ** (agent_centric_states[i,0] / dt_normal))
# if values[j] < 0 and agent_centric_states[j,13] > 0.1:
# state_value = max(0, value_bnd - 0.2)
# action_reward = action_rewards[i] #np.min(action_rewards[i:max(i+1,j)])
action_reward = np.min(action_rewards[i:max(i+1,j)])
############################################################################
# use one point
# print 'i %d, j %d' %(i, j)
dt_forward = time_vec[j] - time_vec[i]
# dist_travelled = np.linalg.norm(agent_states[j,0:2]-agent_states[i,0:2])
dist_travelled = np.sum(dist_travelled_vec[i:j])
dt_forward_scaled = dist_travelled / agent_desired_speed
assert(np.isnan(dt_forward_scaled)==0)
# dt_forward_adj = 1.0 * dt_forward + 0.0 * dt_forward_scaled
dt_forward_adj = 0.5 * dt_forward + 0.5 * dt_forward_scaled
# dt_forward_adj = 1.0 * dt_forward
# print dt_forward, dt_forward_scaled
# raw_input()
# try:
# assert(dt_forward +EPS >= dt_forward_adj)
# except:
# print 'dt_forward', dt_forward
# print 'dt_forward_scaled',dt_forward_scaled
# print 'dt_forward_adj', dt_forward_adj
# print 'dist_travalled', dist_travelled
# print 'dist_travelled / agent_desired_speed', dist_travelled / agent_desired_speed
# assert(0)
state_value = values[j]
value_q_learning = action_reward + gamma ** (dt_forward_adj * \
agent_desired_speed / dt_normal) * state_value
dt_forward_vec[i] = dt_forward
###########################################################################
# use all points upto 1 seconds into the future
# print 'i %d, j %d' %(i, j)
# upper_ind = j+1 # j+1
# lower_ind = min(j, i+5) # i+1
# dt_forward = time_vec[lower_ind:upper_ind] - time_vec[i]
# state_values = values[lower_ind:upper_ind]
# agent_speeds = agent_centric_states[lower_ind:upper_ind,2]
# # dt_forward_post = dt_forward.copy()
# # dt_forward_tmp = time_vec[i+1:j+1] - time_vec[i:j]
# # for tt in xrange(1,j-i):
# # dt_forward_post[tt-1] = dt_forward[tt-1] * 0.2 + 0.8 * np.sum(agent_speeds[0:tt] / agent_desired_speed \
# # * dt_forward_tmp[0:tt])
# dist_travelled = dist_travelled_vec[lower_ind:upper_ind].copy()
# dist_travelled[0] += np.sum(dist_travelled_vec[i:lower_ind])
# for tt in xrange(1, len(dist_travelled)):
# dist_travelled[tt] += dist_travelled[tt-1]
# # dist_travelled = np.linalg.norm(agent_states[lower_ind:upper_ind,0:2]-agent_states[i,0:2], axis=1)
# dt_forward_post = 0.5 * dt_forward + 0.5 * dist_travelled / agent_desired_speed
# value_q_learning = action_reward + np.mean(gamma ** (dt_forward_post * \
# agent_desired_speed / dt_normal) * state_values)
# dt_forward_vec[i] = time_vec[j] - time_vec[i]
# try:
# assert(np.isnan(value_q_learning) == False)
# except:
# print value_q_learning
# print action_reward
# print dt_forward_post
# print dt_forward_post * agent_desired_speed / dt_normal
# assert(0)
############################################################################
if value_q_learning > value_bnd:
value_q_learning = value_bnd
# compute value using actual time to reach goal
if (not if_other_collided) and min_dist_2_others[-1] > 0 and \
np.isnan(time_2_goal_vec[i]) and \
(abs(agent_centric_states[i,0] - agent_centric_states[-1,0]) < 1.0) \
and (abs(agent_centric_states[i,0] - agent_centric_states[-1,0]) < \
1.0 * agent_centric_states[0,1]): # stuck
# print 'min_dist_2_others[-1] > 0', min_dist_2_others[-1] > 0
# value_q_learning = value_q_learning * 0.8 * (agent_centric_states[i,2] / agent_centric_states[i,1])
value_q_learning = 0.01
# value_q_learning = max(0.01, value_q_learning - 0.2)
if_stuck = True
if_stuck_counter += 1
# if trajectory is bad
# vehicle thinks it can reach goal faster than it actually did
# if not np.isnan(time_2_goal_vec[i]) and value_q_learning > EPS:
# agent_desired_speed = agent_centric_states[0,1]
# time_2_goal_value = np.log(value_q_learning) / np.log(gamma) * dt_normal / max(EPS, agent_desired_speed)
# if time_2_goal_value < time_2_goal_vec[i] - 1.0 or time_2_goal_value < time_2_goal_vec[i] * 0.8:
# value_q_learning *= 0.9
# print 'time_2_goal_value', time_2_goal_value
# print 'i', i
# print 'time_2_goal_vec[i]', time_2_goal_vec[i]
# raw_input()
# if np.min(action_rewards[i:]) > -EPS:
# value = max(value_q_learning, value_reach_goal)
# else:
value = value_q_learning
# value = max(value_q_learning, value_reach_goal)
# penalize if the other agent took a lot more time
# num_other_agents = len(other_agents_extra_time)
# for tt, other_agent_states in enumerate(other_agents_states):
# offset = 7 + 2 + tt * 8
# dist_2_other = np.linalg.norm(agent_centric_states[i, offset:offset+2])
# other_dist_2_goal = np.linalg.norm(other_agent_states[-1, 0:2]-other_agent_states[-1, 6:8])
# agent_speed = agent_centric_states[0, 1]
# other_agent_speed = other_agent_states[0, 5]
# other_extra_time = other_agents_extra_time[tt]
# if len(other_extra_time) <= i:
# continue
# if np.isnan(time_2_goal_vec[i]) == False and other_dist_2_goal <= DIST_2_GOAL_THRES and \
# other_extra_time[i] - extra_time[i] > 0.5 \
# and time_2_goal_vec[i] > 1.0 and dist_2_other < 2.5 \
# and agent_speed > other_agent_speed - 0.2:
# # and np.linalg.norm(other_agent_states[i,2:4]) > 0.5*other_agent_speed:
# # print other_extra_time[i], extra_time[i], dist_2_other, \
# # est, other_extra_time[i]-est - extra_time[i]
# penalty = gamma ** (min((other_extra_time[i] - extra_time[i]), 2.0) \
# * agent_desired_speed / dt_normal)
# value *= penalty
# break
# if_extra = True
# # print 'here'
# # print 'other_extra_time[i]', other_extra_time[i]
# # print 'extra_time[i]', extra_time[i]
# # print penalty
# # raw_input()
# # to speed up convergence
# # if iteration < 200 and time_2_goal_vec[i] < dt and dist_2_other > 2.5:
# # value = value_bnd
Y[counter,0] = max(value, -0.25)
# if value_q_learning == 0.01:
# X_stuck_pt, Y_stuck_pt = self.createStateSample(X[counter-1,:])
# # print X_stuck.shape, Y_stuck.shape
# X_stack = np.vstack((X_stuck, X_stuck_pt))
# Y_stack = np.vstack((Y_stuck, Y_stuck_pt))
# # print X_stuck_pt, Y_stuck_pt
# print counter
# if if_stuck_counter > 20:
# break
# future values
# agent_state = agent_states[i,:].copy()
# other_agents_state = [other_agents_states[tt][i,:].copy() for tt in xrange(len(other_agents_states))]
# state_nn_future, value_future = \
# self.find_intended_future_state_value(agent_state, agent_states[i+1,2:4], other_agents_state, dt_forward_vec[i])
# X_future[counter,:] = state_nn_future.copy()
# Y_future[counter,:] = value_future
# future_value_inds.append(j)
# # print 'value_future, values[j], dt_forward_vec[i]', value_future, values[j], dt_forward_vec[i]
# Y_future[i,0] = min(value_future, values[j])
counter += 1
# if counter < num_pts:
# print counter
# print num_pts
# raw_input()
# print counter
# debug
# min_dist_2_others = np.min(agent_centric_states[:,[13,21,29]], axis = 1)
# if np.any(Y[:,0]<EPS) and iteration > 0:
# if iteration > 0:
# np.set_printoptions(precision=4,formatter={'float': '{: 0.3f}'.format})
# print 'time_2_goal_vec, time_2_goal_bnd, dist_2_other, values, action_rewards, dt_forward, value_bnd, value_train'
# value_bnd = GAMMA ** (agent_centric_states[:,0] / DT_NORMAL)
# print np.vstack((time_2_goal_vec, time_2_goal_bnd, min_dist_2_others, values, action_rewards, \
# dt_forward_vec, value_bnd, X[:,0], Y[:,0])).transpose()
# print min_dist_2_others[-1]
# raw_input()
# if traj is too long
if False and counter > 100:
stride = int(counter / 100) + 1
X = X[0:counter:stride,]
Y = Y[0:counter:stride,]
agent_centric_states = agent_centric_states[0:counter:stride,:]
time_vec = time_vec[0:counter:stride]
values = values[0:counter:stride]
action_rewards = action_rewards[0:counter:stride]
else:
X = X[0:counter,:]
Y = Y[0:counter,:]
# print 'counter', counter
# X_future = X_future[0:counter]
# Y_future = Y_future[0:counter]
# Y_min_value = Y[np.clip(np.array(future_value_inds), 0, counter-1)]
# # print Y_min_value.shape, Y_future.shape
# Y_future = np.minimum(Y_future, Y_min_value)
# # print Y_future.shape
# # print np.hstack((Y_future, Y[np.clip(np.array(future_value_inds), 0, counter-1)]))
# # raw_input()
# X = np.vstack((X,X_future))
# Y = np.vstack((Y,Y_future))
values_raw = np.squeeze(self.value_net_copy.nn.make_prediction_raw(X))
# if if_stuck:
# print 'X, Y'
# print np.hstack((X, Y, values_raw[:,np.newaxis]))
# raw_input()
# print values_raw.shape
# print values.shape
min_dist_2_others = np.min(agent_centric_states[:,[13,21,29]], axis = 1)
values_diff = abs((Y[:,0]-values_raw) / Y[:,0])
# zero_inds = np.where(abs(Y[:,0])<EPS)[0]
# if len(zero_inds) > 0:
# print 'wrong', zero_inds, counter
# print X[zero_inds,:]
# print Y[zero_inds,0]
# print values_raw[zero_inds]
# raw_input()
# values_diff = abs((Y[:,0]-values[:-1]) / Y[:,0])
# print Y[:,0].shape
# print values_diff.shape
###################################################################
# # method 1
num_selected_inds = int(len(X)/5)
inds = np.argpartition(values_diff, -num_selected_inds)[-num_selected_inds:]
bad_inds = np.where(values_diff>0.1)[0]
inds = np.union1d(bad_inds, inds)
rand_inds = np.random.permutation(np.arange(len(X)))[0:num_selected_inds]
inds = np.union1d(inds, rand_inds)
# good_inds = np.argpartition(values_diff, num_selected_inds)[:num_selected_inds]
# inds = np.union1d(inds, good_inds)
inds = np.arange(len(X))
###################################################################
# # method 2
# all_inds = np.arange(len(X))
# toward_goal_inds = np.where(abs(X[:,3]) < 0.2)[0]
# # print 'toward_goal_inds %d' \
# # %(len(toward_goal_inds))
# far_inds = np.where(min_dist_2_others < 0.3)[0]
# toward_goal_inds = np.setdiff1d(toward_goal_inds,far_inds)
# # print 'toward_goal_inds %d, not toward_goal_inds %d, total %d' \
# # %(len(toward_goal_inds), len(X) - len(toward_goal_inds), len(X))
# # raw_input()
# bad_inds = np.setdiff1d(all_inds, toward_goal_inds)
# inds = bad_inds
# if len(bad_inds) == 0:
# bad_inds = [0]
# toward_goal_inds_sample = \
# np.random.permutation(toward_goal_inds)[0:len(bad_inds)]
# inds = np.union1d(bad_inds, toward_goal_inds_sample)
# # bad_inds_2 = np.where(Y[:,0]<0.6)[0]
# # inds = np.union1d(inds, bad_inds_2)
###################################################################
X = X[inds,:]
Y = Y[inds,:]
values_diff = values_diff[inds]
# debug
# if counter > 300 or if_agent_collided:
# values_bnd = GAMMA ** (X[:,0]/DT_NORMAL)
# values = values[inds]
# print agent_desired_speed
# print values.shape
# np.set_printoptions(edgeitems=4, precision=4,formatter={'float': '{: 0.4f}'.format})
# print 'dist_2_goal, min_dist_2_others, dt, value_bnd, training_value, raw_values, action_rewardsvalues_diff'
# print np.vstack((X[:,0], min_dist_2_others[inds], time_vec[inds], values_bnd, Y[:,0], values, action_rewards[inds], values_diff)).transpose()
# raw_input()
values_diff = values_diff[:]
# bellman backup
X1 = X.copy()
Y1 = Y.copy()
values_diff1 = values_diff.copy()
speed_factors = np.random.rand(len(X1))
angles_factors = (np.random.rand(len(X1)) - 0.5 ) * 0.1
X1[:,2] *= speed_factors; X1[:,3] = (X1[:,3] + angles_factors + np.pi) % (np.pi * 2) - np.pi
X1[:,4] = X1[:,2] * np.cos(X1[:,3])
X1[:,5] = X1[:,2] * np.sin(X1[:,3])
X = | np.vstack((X,X1)) | numpy.vstack |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import BayesianGaussianMixture
from matplotlib.patches import Ellipse
# For reproducibility
np.random.seed(1000)
nb_samples = 500
nb_centers = 5
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-5, 5],
centers=nb_centers, random_state=1000)
# Train the model with concentration 1000 and 0.1
for c in (1000.0, 0.1):
gm = BayesianGaussianMixture(n_components=5, weight_concentration_prior=c,
max_iter=10000, random_state=1000)
gm.fit(X)
print('Weights: {}'.format(gm.weights_))
Y_pred = gm.fit_predict(X)
print((Y_pred == 0).sum())
print((Y_pred == 1).sum())
print((Y_pred == 2).sum())
print((Y_pred == 3).sum())
print((Y_pred == 4).sum())
# Compute the parameters of the Gaussian mixture
m1 = gm.means_[0]
m2 = gm.means_[1]
m3 = gm.means_[2]
m4 = gm.means_[3]
m5 = gm.means_[4]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
c3 = gm.covariances_[2]
c4 = gm.covariances_[3]
c5 = gm.covariances_[4]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
we3 = 1 + gm.weights_[2]
we4 = 1 + gm.weights_[3]
we5 = 1 + gm.weights_[4]
w1, v1 = np.linalg.eigh(c1)
w2, v2 = np.linalg.eigh(c2)
w3, v3 = np.linalg.eigh(c3)
w4, v4 = np.linalg.eigh(c4)
w5, v5 = np.linalg.eigh(c5)
nv1 = v1 / np.linalg.norm(v1)
nv2 = v2 / | np.linalg.norm(v2) | numpy.linalg.norm |
Subsets and Splits