prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
from openpnm.algorithms import IonicTransport, TransientReactiveTransport
class TransientIonicTransport(IonicTransport, TransientReactiveTransport):
r"""
A subclass of GenericTransport to perform steady and transient simulations
of pure diffusion, advection-diffusion and advection-diffusion with
migration.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'gui': {'setup': {'phase': None,
'potential_field': '',
'ions': [],
'i_tolerance': None,
'i_max_iter': None,
't_initial': None,
't_final': None,
't_step': None,
't_output': None,
't_tolerance': None,
't_scheme': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, potential_field='', ions=[], i_tolerance=None,
i_max_iter=None, t_initial=None, t_final=None, t_step=None,
t_output=None, t_tolerance=None, t_precision=None, t_scheme='',
**kwargs):
if phase:
self.settings['phase'] = phase.name
if potential_field:
self.settings['potential_field'] = potential_field
if ions:
self.settings['ions'] = ions
if i_tolerance:
self.settings['i_tolerance'] = i_tolerance
if i_max_iter:
self.settings['i_max_iter'] = i_max_iter
if t_initial is not None:
self.settings['t_initial'] = t_initial
if t_final is not None:
self.settings['t_final'] = t_final
if t_step is not None:
self.settings['t_step'] = t_step
if t_output is not None:
self.settings['t_output'] = t_output
if t_tolerance is not None:
self.settings['t_tolerance'] = t_tolerance
if t_precision is not None:
self.settings['t_precision'] = t_precision
if t_scheme:
self.settings['t_scheme'] = t_scheme
self.settings.update(kwargs)
def run(self, t=None):
r"""
"""
print('―'*80)
print('Running TransientIonicTransport')
# Phase, potential and ions algorithms
phase = self.project.phases()[self.settings['phase']]
p_alg = self.project.algorithms()[self.settings['potential_field']]
e_alg = [self.project.algorithms()[self.settings['ions'][i]] for i in
range(len(self.settings['ions']))]
algs = e_alg.copy()
algs.insert(0, p_alg)
# Define initial conditions (if not defined by the user)
for alg in algs:
try:
alg[alg.settings['quantity']]
except KeyError:
try:
alg.set_IC(phase[alg.settings['quantity']])
except KeyError:
alg.set_IC(0)
# Save A matrix of the steady sys of eqs (WITHOUT BCs applied)
for alg in algs:
alg._build_A()
alg._A_steady = (alg._A).copy()
# Initialize A and b with BCs applied
for e in e_alg:
e._t_update_A()
e._t_update_b()
e._apply_BCs()
e._A_t = (e._A).copy()
e._b_t = (e._b).copy()
# Init A&b with BCs for charge conservation eq, independent of t_scheme
p_alg._apply_BCs()
p_alg._A_t = (p_alg._A).copy()
p_alg._b_t = (p_alg._b).copy()
if t is None:
t = self.settings['t_initial']
# Create S1 & S1 for 1st Picard's iteration
for alg in algs:
alg._update_iterative_props()
# Setup algorithms transient settings
for alg in algs:
alg.setup(t_initial=self.settings['t_initial'],
t_final=self.settings['t_final'],
t_step=self.settings['t_step'],
t_output=self.settings['t_output'],
t_tolerance=self.settings['t_tolerance'],
t_precision=self.settings['t_precision'],
t_scheme=self.settings['t_scheme'])
self._run_transient(t=t)
def _run_transient(self, t):
"""r
"""
# Phase, potential and ions algorithms
phase = self.project.phases()[self.settings['phase']]
p_alg = self.project.algorithms()[self.settings['potential_field']]
e_alg = [self.project.algorithms()[self.settings['ions'][i]] for i in
range(len(self.settings['ions']))]
algs = e_alg.copy()
algs.insert(0, p_alg)
tf = self.settings['t_final']
dt = self.settings['t_step']
to = self.settings['t_output']
t_tol = self.settings['t_tolerance']
i_tol = self.settings['i_tolerance']
t_pre = self.settings['t_precision']
s = self.settings['t_scheme']
# Initialize residuals & old/new fields for time marching
t_res = {}
t_old = {}
t_new = {}
for alg in algs:
t_res[alg.name] = 1e+06
t_old[alg.name] = None
t_new[alg.name] = None
if type(to) in [float, int]:
# Make sure 'tf' and 'to' are multiples of 'dt'
tf = tf + (dt-(tf % dt))*((tf % dt) != 0)
to = to + (dt-(to % dt))*((to % dt) != 0)
self.settings['t_final'] = tf
self.settings['t_output'] = to
out = np.arange(t+to, tf, to)
elif type(to) in [np.ndarray, list]:
out = np.array(to)
out = np.append(out, tf)
out = np.unique(out)
out =
|
np.around(out, decimals=t_pre)
|
numpy.around
|
from torch.utils.data import Dataset
import glob
import numpy as np
import SimpleITK as sitk
from skimage import transform
import torch
class LGE_TrainSet(Dataset):
def __init__(self,dir,sample_num):
self.imgdir=dir+'/LGE/'
self.imgsname = glob.glob(self.imgdir + '*LGE.nii*')
imgs = np.zeros((1,192,192))
self.info = []
self.times = int((35.0 / sample_num) * 4)
for img_num in range(sample_num):
itkimg = sitk.ReadImage(self.imgsname[img_num])
npimg = sitk.GetArrayFromImage(itkimg) # Z,Y,X,220*240*1
npimg = npimg.astype(np.float32)
imgs =
|
np.concatenate((imgs,npimg),axis=0)
|
numpy.concatenate
|
import numpy as np
import pytest
import torch
from probflow.data import ArrayDataGenerator
from probflow.distributions import Gamma, Normal
from probflow.models import Model
from probflow.modules import Dense, Module
from probflow.parameters import (
DeterministicParameter,
Parameter,
ScaleParameter,
)
from probflow.utils.casting import to_tensor
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Model_0D():
"""Tests the probflow.models.Model abstract base class"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
x = to_tensor(x)
return Normal(x * self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Fit the model
x = np.random.randn(100).astype("float32")
y = -x + 1
my_model.fit(x, y, batch_size=5, epochs=3)
# Shouldn't be training
assert my_model._is_training is False
# Should be able to set learning rate
lr = my_model._learning_rate
my_model.set_learning_rate(lr + 1.0)
assert lr != my_model._learning_rate
# but error w/ wrong type
with pytest.raises(TypeError):
my_model.set_learning_rate("asdf")
# Should be able to set learning rate
assert my_model._kl_weight == 1.0
my_model.set_kl_weight(2.0)
assert my_model._kl_weight == 2.0
# but error w/ wrong type
with pytest.raises(TypeError):
my_model.set_kl_weight("asdf")
# predictive samples
samples = my_model.predictive_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# aleatoric samples
samples = my_model.aleatoric_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# epistemic samples
samples = my_model.epistemic_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# predict
samples = my_model.predict(x[:30])
assert isinstance(samples, np.ndarray)
assert samples.ndim == 1
assert samples.shape[0] == 30
with pytest.raises(ValueError):
samples = my_model.predict(x[:30], method="asdf")
# predict using the mode instead of the mean (same for normal dists)
# TODO: mode not working yet for pytorch...
"""
samples = my_model.predict(x[:30], method="mode")
assert isinstance(samples, np.ndarray)
assert samples.ndim == 1
assert samples.shape[0] == 30
"""
# metric
metric = my_model.metric("mae", x[:30], y[:30])
assert isinstance(metric, np.floating)
metric = my_model.metric("mse", x[:30], y[:30])
assert isinstance(metric, np.floating)
assert metric >= 0
# posterior_mean w/ no args should return all params
val = my_model.posterior_mean()
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
# posterior_mean w/ str should return value of that param
val = my_model.posterior_mean("Weight")
assert isinstance(val, np.ndarray)
assert val.ndim == 1
# posterior_mean w/ list of params should return only those params
val = my_model.posterior_mean(["Weight", "Std"])
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
# posterior_sample w/ no args should return all params
val = my_model.posterior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
assert all(val[v].shape[1] == 1 for v in val)
# posterior_sample w/ str should return sample of that param
val = my_model.posterior_sample("Weight", n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 2
assert val.shape[0] == 20
assert val.shape[1] == 1
# posterior_sample w/ list of params should return only those params
val = my_model.posterior_sample(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
assert all(val[v].shape[1] == 1 for v in val)
# posterior_ci should return confidence intervals of all params by def
val = my_model.posterior_ci(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 1 for v in val)
assert all(val[v][1].ndim == 1 for v in val)
assert all(val[v][0].shape[0] == 1 for v in val)
assert all(val[v][1].shape[0] == 1 for v in val)
# posterior_ci should return ci of only 1 if passed str
val = my_model.posterior_ci("Weight", n=20)
assert isinstance(val, tuple)
assert isinstance(val[0], np.ndarray)
assert isinstance(val[1], np.ndarray)
# posterior_ci should return specified cis if passed list of params
val = my_model.posterior_ci(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 1 for v in val)
assert all(val[v][1].ndim == 1 for v in val)
assert all(val[v][0].shape[0] == 1 for v in val)
assert all(val[v][1].shape[0] == 1 for v in val)
# prior_sample w/ no args should return all params
val = my_model.prior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# prior_sample w/ str should return sample of that param
val = my_model.prior_sample("Weight", n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 1
assert val.shape[0] == 20
# prior_sample w/ list of params should return only those params
val = my_model.prior_sample(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# log_prob should return log prob of each sample by default
probs = my_model.log_prob(x[:30], y[:30])
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 30
# log_prob should return sum if individually = False
s_prob = my_model.log_prob(x[:30], y[:30], individually=False)
assert isinstance(s_prob, np.floating)
assert s_prob == np.sum(probs)
# log_prob should return samples w/ distribution = True
probs = my_model.log_prob(x[:30], y[:30], n=10, distribution=True)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 30
assert probs.shape[1] == 10
# log_prob should return samples w/ distribution = True
probs = my_model.log_prob(
x[:30], y[:30], n=10, distribution=True, individually=False
)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 10
# prob should return prob of each sample by default
probs = my_model.prob(x[:30], y[:30])
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 30
assert np.all(probs >= 0)
# prob should return sum if individually = False
s_prob = my_model.prob(x[:30], y[:30], individually=False)
assert isinstance(s_prob, np.floating)
# prob should return samples w/ distribution = True
probs = my_model.prob(x[:30], y[:30], n=10, distribution=True)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 30
assert probs.shape[1] == 10
assert np.all(probs >= 0)
# prob should return samples w/ distribution = True
probs = my_model.prob(
x[:30], y[:30], n=10, distribution=True, individually=False
)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 10
assert np.all(probs >= 0)
# summary method should run
my_model.summary()
def test_Model_force_eager():
"""Tests fitting probflow.model.Model forcing eager=True"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
x = to_tensor(x)
return Normal(x * self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Fit the model
x = np.random.randn(100).astype("float32")
y = -x + 1
my_model.fit(x, y, batch_size=50, epochs=2, eager=True)
def test_Model_force_no_flipout():
"""Tests fitting probflow.model.Model forcing flipout=False"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
x = to_tensor(x)
return Normal(x * self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Fit the model
x = np.random.randn(100).astype("float32")
y = -x + 1
my_model.fit(x, y, batch_size=50, epochs=2, flipout=False)
def test_Model_nonprobabilistic():
"""Tests fitting probflow.model.Model with a non-probabilistic dense layer.
Shouldn't use flipout in this case (default is to use it), will error if it
does.
"""
class MyModel(Model):
def __init__(self):
self.net = Dense(1, 1, probabilistic=False)
self.std = DeterministicParameter(
transform=lambda x: torch.nn.Softplus()(x)
)
def __call__(self, x):
x = to_tensor(x)
return Normal(self.net(x), self.std())
# Instantiate the model
my_model = MyModel()
# Fit the model
x = np.random.randn(100, 1).astype("float32")
y = -x + 1
my_model.fit(x, y, batch_size=50, epochs=2)
def test_Model_with_dataframe():
"""Tests fitting probflow.model.Model w/ DataFrame and eager=False"""
import pandas as pd
class MyModel(Model):
def __init__(self, cols):
self.cols = cols
self.weight = Parameter([len(cols), 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
self.std = ScaleParameter([1, 1], name="Std")
def __call__(self, x):
x = x[self.cols].values
x = to_tensor(x)
return Normal(x @ self.weight() + self.bias(), self.std())
# Data
N = 256
D = 3
cols = ["feature1", "feature2", "feature3"]
x_np = np.random.randn(N, D).astype("float32")
w = np.random.randn(D, 1).astype("float32")
y = x_np @ w + 0.1 * np.random.randn(N, 1).astype("float32")
x_df = pd.DataFrame(x_np, columns=cols)
y_s = pd.Series(y[:, 0])
# Instantiate the model
my_model = MyModel(cols)
# Fitting should work w/ DataFrame b/c it falls back on eager
my_model.fit(x_df, y_s, epochs=2)
# And should still work with eager execution when set
my_model.fit(x_df, y_s, epochs=2, eager=True)
def test_Model_ArrayDataGenerators():
"""Tests the probflow.models.Model sampling/predictive methods when
passed ArrayDataGenerators"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
x = to_tensor(x)
return Normal(x * self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Make a ArrayDataGenerator
x = np.random.randn(100).astype("float32")
y = -x + 1
data = ArrayDataGenerator(x, y, batch_size=5)
# Fit the model
my_model.fit(data, epochs=3)
# predictive samples
samples = my_model.predictive_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# aleatoric samples
samples = my_model.aleatoric_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# epistemic samples
samples = my_model.epistemic_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# predict
samples = my_model.predict(data)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 1
assert samples.shape[0] == 100
# metric
metric = my_model.metric("mae", data)
assert isinstance(metric, np.floating)
metric = my_model.metric("mse", data)
assert isinstance(metric, np.floating)
assert metric >= 0
def test_Model_1D():
"""Tests the probflow.models.Model abstract base class"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter([5, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
self.std = ScaleParameter([1, 1], name="Std")
def __call__(self, x):
x = to_tensor(x)
return Normal(x @ self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Data
x = np.random.randn(100, 5).astype("float32")
w = np.random.randn(5, 1).astype("float32")
y = x @ w + 1
# Fit the model
my_model.fit(x, y, batch_size=5, epochs=3)
# predictive samples
samples = my_model.predictive_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# aleatoric samples
samples = my_model.aleatoric_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# epistemic samples
samples = my_model.epistemic_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# predict
samples = my_model.predict(x[:30, :])
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 30
assert samples.shape[1] == 1
# metric
metric = my_model.metric("mse", x[:30, :], y[:30, :])
assert isinstance(metric, np.floating)
metric = my_model.metric("mae", x[:30, :], y[:30, :])
assert isinstance(metric, np.floating)
assert metric >= 0
# posterior_mean w/ no args should return all params
val = my_model.posterior_mean()
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert val["Weight"].shape[0] == 5
assert val["Weight"].shape[1] == 1
assert val["Bias"].shape[0] == 1
assert val["Bias"].shape[1] == 1
assert val["Std"].shape[0] == 1
assert val["Std"].shape[1] == 1
# posterior_mean w/ str should return value of that param
val = my_model.posterior_mean("Weight")
assert isinstance(val, np.ndarray)
assert val.ndim == 2
assert val.shape[0] == 5
assert val.shape[1] == 1
# posterior_mean w/ list of params should return only those params
val = my_model.posterior_mean(["Weight", "Std"])
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert val["Weight"].shape[0] == 5
assert val["Weight"].shape[1] == 1
assert val["Std"].shape[0] == 1
assert val["Std"].shape[1] == 1
# posterior_sample w/ no args should return all params
val = my_model.posterior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 3 for v in val)
assert val["Weight"].shape[0] == 20
assert val["Weight"].shape[1] == 5
assert val["Weight"].shape[2] == 1
assert val["Bias"].shape[0] == 20
assert val["Bias"].shape[1] == 1
assert val["Bias"].shape[2] == 1
assert val["Std"].shape[0] == 20
assert val["Std"].shape[1] == 1
assert val["Std"].shape[2] == 1
# posterior_sample w/ str should return sample of that param
val = my_model.posterior_sample("Weight", n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 3
assert val.shape[0] == 20
assert val.shape[1] == 5
assert val.shape[2] == 1
# posterior_sample w/ list of params should return only those params
val = my_model.posterior_sample(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 3 for v in val)
assert val["Weight"].shape[0] == 20
assert val["Weight"].shape[1] == 5
assert val["Weight"].shape[2] == 1
assert val["Std"].shape[0] == 20
assert val["Std"].shape[1] == 1
assert val["Std"].shape[2] == 1
# posterior_ci should return confidence intervals of all params by def
val = my_model.posterior_ci(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 2 for v in val)
assert all(val[v][1].ndim == 2 for v in val)
for i in range(1):
assert val["Weight"][i].shape[0] == 5
assert val["Weight"][i].shape[1] == 1
assert val["Bias"][i].shape[0] == 1
assert val["Bias"][i].shape[1] == 1
assert val["Std"][i].shape[0] == 1
assert val["Std"][i].shape[1] == 1
# posterior_ci should return ci of only 1 if passed str
val = my_model.posterior_ci("Weight", n=20)
assert isinstance(val, tuple)
assert isinstance(val[0], np.ndarray)
assert isinstance(val[1], np.ndarray)
# posterior_ci should return specified cis if passed list of params
val = my_model.posterior_ci(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 2 for v in val)
assert all(val[v][1].ndim == 2 for v in val)
for i in range(1):
assert val["Weight"][i].shape[0] == 5
assert val["Weight"][i].shape[1] == 1
assert val["Std"][i].shape[0] == 1
assert val["Std"][i].shape[1] == 1
# prior_sample w/ no args should return all params
val = my_model.prior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert "Weight" in val
assert "Bias" in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# prior_sample w/ str should return sample of that param
val = my_model.prior_sample("Weight", n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 1
assert val.shape[0] == 20
# prior_sample w/ list of params should return only those params
val = my_model.prior_sample(["Weight", "Std"], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert "Weight" in val
assert "Bias" not in val
assert "Std" in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
def test_generative_Model():
"""Tests the probflow.models.Model w/ a generative model (only x)"""
class MyModel(Model):
def __init__(self):
self.mean = Parameter([1], name="Mean")
self.std = ScaleParameter([1], name="Std")
def __call__(self):
return Normal(self.mean(), self.std())
# Instantiate the model
model = MyModel()
# Data
X = np.random.randn(100, 1).astype("float32")
# Fit the model
model.fit(X, batch_size=10, epochs=3)
# predictive samples
samples = model.predictive_sample(n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 1
# log_prob
y = np.random.randn(10, 1)
probs = model.log_prob(y)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 10
assert probs.shape[1] == 1
probs = model.log_prob(y, individually=False)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 1
probs = model.log_prob(y, distribution=True, n=11)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 3
assert probs.shape[0] == 10
assert probs.shape[1] == 1
assert probs.shape[2] == 11
probs = model.log_prob(y, distribution=True, n=11, individually=False)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 1
assert probs.shape[1] == 11
def test_Model_nesting():
"""Tests Model when it contains Modules and sub-modules"""
class MyModule(Module):
def __init__(self):
self.weight = Parameter([5, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
def __call__(self, x):
x = to_tensor(x)
return x @ self.weight() + self.bias()
class MyModel(Model):
def __init__(self):
self.module = MyModule()
self.std = ScaleParameter(
[1, 1], name="Std", prior=Gamma(1.0, 1.0)
)
def __call__(self, x):
return Normal(self.module(x), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Data
x = np.random.randn(100, 5).astype("float32")
w = np.random.randn(5, 1).astype("float32")
y = x @ w + 1
# Fit the model
my_model.fit(x, y, batch_size=5, epochs=3)
# predictive samples
samples = my_model.predictive_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# kl loss should be greater for outer model
assert (
my_model.kl_loss().detach().numpy()
> my_model.module.kl_loss().detach().numpy()
)
def test_Model_multiple_mc_0d_eager():
"""Fit probflow.model.Model w/ n_mc>1 to 0d data in eager mode"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
w = self.weight()
b = self.bias()
s = self.std()
m = x * w + b
# check shapes are as expected
if self._is_training:
assert x.ndim == 2
assert x.shape[0] == 1
assert x.shape[1] == 50
assert w.shape[0] == 5
assert w.shape[1] == 1
assert b.shape[0] == 5
assert b.shape[1] == 1
assert s.shape[0] == 5
assert s.shape[1] == 1
assert m.shape[0] == 5
assert m.shape[1] == 50
else: # predicting
assert x.ndim == 1
assert x.shape[0] == 11
assert w.shape[0] == 1
assert b.shape[0] == 1
assert s.shape[0] == 1
assert m.shape[0] == 11
return Normal(m, s)
# Instantiate the model
model = MyModel()
# Fit the model
x = np.random.randn(100).astype("float32")
y = -x + 1
model.fit(x, y, batch_size=50, epochs=2, n_mc=5, eager=True)
# Check predictions
p = model.predict(x[:11])
assert isinstance(p, np.ndarray)
assert p.ndim == 1
assert p.shape[0] == 11
def test_Model_multiple_mc_0d_noneager():
"""Fit probflow.model.Model w/ n_mc>1 to 0d data in non-eager mode"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name="Weight")
self.bias = Parameter(name="Bias")
self.std = ScaleParameter(name="Std")
def __call__(self, x):
# can't check shapes b/c tracing it ignores this code
# so just check that it works
return Normal(x * self.weight() + self.bias(), self.std())
# Instantiate the model
model = MyModel()
# Fit the model
x = np.random.randn(100).astype("float32")
y = -x + 1
model.fit(x, y, batch_size=50, epochs=2, n_mc=5, eager=False)
# Check predictions
p = model.predict(x[:11])
assert isinstance(p, np.ndarray)
assert p.ndim == 1
assert p.shape[0] == 11
def test_Model_multiple_mc_1d_eager():
"""Fit probflow.model.Model w/ n_mc>1 to vector data in eager mode"""
class MyModel(Model):
def __init__(self, d_in):
self.weight = Parameter([d_in, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
self.std = ScaleParameter([1, 1], name="Std")
def __call__(self, x):
w = self.weight()
b = self.bias()
s = self.std()
m = x @ w + b
# check shapes are as expected
if self._is_training:
assert x.ndim == 3
assert x.shape[0] == 1
assert x.shape[1] == 50
assert x.shape[2] == 3
assert w.shape[0] == 5
assert w.shape[1] == 3
assert w.shape[2] == 1
assert b.shape[0] == 5
assert b.shape[1] == 1
assert b.shape[2] == 1
assert s.shape[0] == 5
assert s.shape[1] == 1
assert s.shape[2] == 1
assert m.shape[0] == 5
assert m.shape[1] == 50
assert m.shape[2] == 1
else: # predicting
assert x.ndim == 2
assert x.shape[0] == 11
assert x.shape[1] == 3
assert w.shape[0] == 3
assert w.shape[1] == 1
assert b.shape[0] == 1
assert b.shape[1] == 1
assert s.shape[0] == 1
assert s.shape[1] == 1
assert m.shape[0] == 11
assert m.shape[1] == 1
return Normal(m, s)
# Instantiate the model
model = MyModel(3)
# Fit the model
x = np.random.randn(100, 3).astype("float32")
w = np.random.randn(3, 1).astype("float32")
y = x @ w + 1
model.fit(x, y, batch_size=50, epochs=2, n_mc=5, eager=True)
# Check predictions
p = model.predict(x[:11, :])
assert isinstance(p, np.ndarray)
assert p.ndim == 2
assert p.shape[0] == 11
assert p.shape[1] == 1
def test_Model_multiple_mc_1d_noneager():
"""Fit probflow.model.Model w/ n_mc>1 to vector data in non-eager mode"""
class MyModel(Model):
def __init__(self, d_in):
self.weight = Parameter([d_in, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
self.std = ScaleParameter([1, 1], name="Std")
def __call__(self, x):
w = self.weight()
b = self.bias()
s = self.std()
m = x @ w + b
return Normal(m, s)
# Instantiate the model
model = MyModel(3)
# Fit the model
x = np.random.randn(100, 3).astype("float32")
w =
|
np.random.randn(3, 1)
|
numpy.random.randn
|
"""
This module contains the _Flamelet2D class that provides a high-level interface for nonpremixed three-stream flamelets.
This is unsupported at the moment but it's pretty close to being useful...
"""
# Spitfire - a Python-C++ library for building tabulated chemistry models and solving differential equations
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
#
# You should have received a copy of the 3-clause BSD License
# along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
# Questions? Contact <NAME> (<EMAIL>)
import numpy as np
from scipy.special import erfinv
from scipy.sparse.linalg import LinearOperator, bicgstab
class _Flamelet2D(object):
_initializations = ['unreacted', 'equilibrium', 'linear-TY']
_grid_types = ['uniform', 'clustered']
_rates_sensitivity_option_dict = {'exact': 0, 'no-TBAF': 1}
_sensitivity_transform_option_dict = {'exact': 0}
@classmethod
def _compute_dissipation_rate(cls,
mixture_fraction,
max_dissipation_rate,
form='Peters'):
"""Compute the scalar dissipation rate across mixture fraction
Parameters
----------
mixture_fraction : array_like
the locations of grid points in mixture fraction space
max_dissipation_rate : float
the maximum value of the dissipation rate
form : str, optional
the form of the dissipation rate's dependency on mixture fraction, defaults to 'Peters', which
uses the form of N. Peters, Turbulent Combustion, 2000.
Specifying anything else will yield a constant scalar dissipation rate.
Returns
-------
x : array_like
the scalar dissipation rate on the given mixture fraction grid
"""
if form == 'Peters' or form == 'peters':
x = max_dissipation_rate * np.exp(-2. * (erfinv(2. * mixture_fraction - 1.)) ** 2)
else:
x = np.empty_like(mixture_fraction)
x[:] = max_dissipation_rate
return x
def __init__(self,
mech_spec,
initial_condition,
pressure,
stream_1,
stream_2,
stream_3,
max_dissipation_rate_1,
max_dissipation_rate_2,
dissipation_rate_1_form='Peters',
dissipation_rate_2_form='Peters',
grid_1=None,
grid_2=None,
rates_sensitivity_type='exact',
sensitivity_transform_type='exact'):
self._gas = mech_spec.copy_stream(stream_1)
self._stream_1 = stream_1
self._stream_2 = stream_2
self._stream_3 = stream_3
self._pressure = pressure
self._mechanism = mech_spec
self._griffon = self._mechanism.griffon
self._n_species = self._gas.n_species
self._n_reactions = self._gas.n_reactions
self._n_equations = self._n_species
self._state_1 = np.hstack([stream_1.T, stream_1.Y[:-1]])
self._state_2 = np.hstack([stream_2.T, stream_2.Y[:-1]])
self._state_3 = np.hstack([stream_3.T, stream_3.Y[:-1]])
self._rsopt = self._rates_sensitivity_option_dict[rates_sensitivity_type]
self._stopt = self._sensitivity_transform_option_dict[sensitivity_transform_type]
self._chi_1 = self._compute_dissipation_rate(grid_1, max_dissipation_rate_1, dissipation_rate_1_form)
self._chi_2 = self._compute_dissipation_rate(grid_2, max_dissipation_rate_2, dissipation_rate_2_form)
self._n_x = grid_1.size
self._n_y = grid_2.size
self._n_dof = self._n_equations * self._n_x * self._n_y # - 4
self._x_range = grid_1
self._y_range = grid_2
self._dx = grid_1[1:] - grid_1[:-1]
self._dy = grid_2[1:] - grid_2[:-1]
self._initial_state = np.zeros(self._n_dof)
self._z_1 = np.zeros(self._n_x * self._n_y)
self._z_2 = np.zeros(self._n_x * self._n_y)
nx = self._n_x
ny = self._n_y
nq = self._n_equations
nyq = ny * nq
h1 = self._stream_1.enthalpy_mass
h2 = self._stream_2.enthalpy_mass
h3 = self._stream_3.enthalpy_mass
Y1 = self._stream_1.Y
Y2 = self._stream_2.Y
Y3 = self._stream_3.Y
if isinstance(initial_condition, np.ndarray):
self._initial_state = np.copy(initial_condition)
elif isinstance(initial_condition, str):
for i in range(nx):
x = self._x_range[i]
for j in range(ny):
y = self._y_range[j]
ij_z = i * ny + j
ij = i * nyq + j * nq
if y > 1. - x:
z1 = 1. - y
z2 = 1. - x
else:
z1 = x
z2 = y
self._z_1[ij_z] = z1
self._z_2[ij_z] = z2
if initial_condition == 'linear-TY':
mix_state = z1 * self._state_3 + z2 * self._state_2 + (1. - z1 - z2) * self._state_1
self._initial_state[ij:ij + nq] = mix_state
else:
hmix = z1 * h3 + z2 * h2 + (1. - z1 - z2) * h1
Ymix = z1 * Y3 + z2 * Y2 + (1. - z1 - z2) * Y1
mix = mech_spec.stream('HPY', (hmix, pressure, Ymix))
if initial_condition == 'equilibrium':
mix.equilibrate('HP')
elif initial_condition == 'unreacted':
pass
else:
raise ValueError(
'invalid initial_condition string, only "equilibrium", "unreacted", and "linear-TY" are allowed')
self._initial_state[ij:ij + nq] = np.hstack([mix.T, mix.Y[:-1]])
self._variable_scales = np.ones_like(self._initial_state)
self._variable_scales[::nq] = 1.e3
nxq = (self._n_x - 1) * self._n_equations
nyq = (self._n_y - 1) * self._n_equations
self._xcp = np.zeros(nxq)
self._xcr = np.zeros(nxq)
self._xcl = np.zeros(nxq)
self._ycp =
|
np.zeros(nyq)
|
numpy.zeros
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import copy
matplotlib.rcParams["font.size"] = 13
def phase(z):
val = np.angle(z)
# val = np.rad2deg(np.unwrap(np.angle((z))))
return val
class DataView(object):
"""
Provides viewingtions for Data
This can be inherited by XXX
"""
def set_xyz(self, x, y, z, normal="Z", geometry="grid"):
self.normal = normal
self.geometry = geometry
if geometry.upper() == "GRID":
if normal.upper() == "X":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = 1, y.size, z.size
self.Y, self.Z = np.meshgrid(y, z)
self.xyz = np.c_[
x * np.ones(self.ncy * self.ncz), self.Y.flatten(), self.Z.flatten()
]
elif normal.upper() == "Y":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, z.size
self.X, self.Z = np.meshgrid(x, z)
self.xyz = np.c_[
self.X.flatten(), y * np.ones(self.ncx * self.ncz), self.Z.flatten()
]
elif normal.upper() == "Z":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, y.size, 1
self.X, self.Y = np.meshgrid(x, y)
self.xyz = np.c_[
self.X.flatten(), self.Y.flatten(), z * np.ones(self.ncx * self.ncy)
]
elif geometry.upper() == "PROFILE":
if normal.upper() == "X":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = 1, y.size, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[x * np.ones_like(self.y), self.Y, self.Z]
elif normal.upper() == "Y":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[self.x, y * np.ones_like(self.x), self.Z]
elif normal.upper() == "Z":
self.x, self.y, self.z = x, y, z
self.ncx, self.ncy, self.ncz = x.size, 1, 1
self.Y, self.Z = self.y, self.z
self.xyz = np.c_[self.x, self.y, z * np.ones_like(self.x)]
def eval_loc(self, srcLoc, obsLoc, log_sigvec, log_fvec, orientation, normal, func):
self.srcLoc = srcLoc
self.obsLoc = obsLoc
self.log_sigvec = log_sigvec
self.log_fvec = log_fvec
self.sigvec = 10.0 ** log_sigvec
self.fvec = 10.0 ** log_fvec
self.orientation = orientation
self.normal = normal
self.func1D = func
self.val_xfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
self.val_yfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
self.val_zfs = np.zeros((len(log_sigvec), len(log_fvec)), dtype=complex)
for n in range(len(log_sigvec)):
self.val_xfs[n], self.val_yfs[n], self.val_zfs[n] = func(
self.obsLoc,
srcLoc,
10.0 ** log_sigvec[n],
10.0 ** log_fvec,
orientation=self.orientation,
)
def eval(self, xyz, srcLoc, sig, f, orientation, func, normal="Z", t=0.0):
val_x, val_y, val_z = func(xyz, srcLoc, sig, f, orientation=orientation, t=t)
return val_x, val_y, val_z
def eval_TD(self, xyz, srcLoc, sig, t, orientation, func, normal="Z"):
val_x, val_y, val_z = func(xyz, srcLoc, sig, t, orientation=orientation)
return val_x, val_y, val_z
def eval_2D(self, srcLoc, sig, f, orientation, func, t=0.0):
self.func2D = func
self.srcLoc = srcLoc
self.sig = sig
self.t = f
self.orientation = orientation
self.val_x, self.val_y, self.val_z = func(
self.xyz, srcLoc, sig, f, orientation=orientation, t=t
)
if self.normal.upper() == "X":
def Freshape(v):
return v.reshape(self.ncy, self.ncz)
elif self.normal.upper() == "Y":
def Freshape(v):
return v.reshape(self.ncx, self.ncz)
elif self.normal == "Z":
def Freshape(v):
return v.reshape(self.ncx, self.ncy)
self.VAL_X = Freshape(self.val_x)
self.VAL_Y = Freshape(self.val_y)
self.VAL_Z = Freshape(self.val_z)
self.VEC_R_amp = np.sqrt(
self.VAL_X.real ** 2 + self.VAL_Y.real ** 2 + self.VAL_Z.real ** 2
)
self.VEC_I_amp = np.sqrt(
self.VAL_X.imag ** 2 + self.VAL_Y.imag ** 2 + self.VAL_Z.imag ** 2
)
self.VEC_A_amp = np.sqrt(
np.abs(self.VAL_X) ** 2 + np.abs(self.VAL_Y) ** 2 + np.abs(self.VAL_Z) ** 2
)
self.VEC_P_amp = np.sqrt(
phase(self.VAL_X) ** 2 + phase(self.VAL_Y) ** 2 + phase(self.VAL_Z) ** 2
)
def eval_2D_TD(self, srcLoc, sig, t, orientation, func):
self.func2D = func
self.srcLoc = srcLoc
self.sig = sig
self.t = t
self.orientation = orientation
self.val_x, self.val_y, self.val_z = func(
self.xyz, srcLoc, sig, t, orientation=orientation
)
if self.normal.upper() == "X":
def Freshape(v):
return v.reshape(self.ncy, self.ncz)
elif self.normal.upper() == "Y":
def Freshape(v):
return v.reshape(self.ncx, self.ncz)
elif self.normal.upper() == "Z":
def Freshape(v):
return v.reshape(self.ncx, self.ncy)
self.VAL_X = Freshape(self.val_x)
self.VAL_Y = Freshape(self.val_y)
self.VAL_Z = Freshape(self.val_z)
self.VEC_amp = np.sqrt(
self.VAL_X.real ** 2 + self.VAL_Y.real ** 2 + self.VAL_Z.real ** 2
)
def plot2D_FD(
self,
component="real",
view="vec",
ncontour=20,
logamp=True,
clim=None,
showcontour=False,
levels=None,
ax=None,
colorbar=True,
cmap="viridis",
):
"""
2D visualization of dipole fields
"""
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if component == "real":
VAL_X = self.VAL_X.real
VAL_Y = self.VAL_Y.real
VAL_Z = self.VAL_Z.real
VEC_amp = self.VEC_R_amp
elif component == "imag":
VAL_X = self.VAL_X.imag
VAL_Y = self.VAL_Y.imag
VAL_Z = self.VAL_Z.imag
VEC_amp = self.VEC_I_amp
elif component == "amplitude":
VAL_X = abs(self.VAL_X)
VAL_Y = abs(self.VAL_Y)
VAL_Z = abs(self.VAL_Z)
VEC_amp = self.VEC_A_amp
elif component == "phase":
VAL_X = phase(self.VAL_X)
VAL_Y = phase(self.VAL_Y)
VAL_Z = phase(self.VAL_Z)
VEC_amp = self.VEC_P_amp
else:
raise Exception("component should be in real, imag, amplitude, or phase!")
if view == "amp" or view == "vec":
val = VEC_amp
elif view.upper() == "X":
val = VAL_X
elif view.upper() == "Y":
val = VAL_Y
elif view.upper() == "Z":
val = VAL_Z
if logamp is True:
zeroind = val == 0
val = np.log10(abs(val))
val[zeroind] = val[~zeroind].min()
if self.normal.upper() == "X":
a, b = self.y, self.z
vec_a, vec_b = self.VAL_Y, self.VAL_Z
xlabel = "Y (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Y":
a, b = self.x, self.z
vec_a, vec_b = self.VAL_X, self.VAL_Z
xlabel = "X (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Z":
a, b = self.x, self.y
vec_a, vec_b = self.VAL_X, self.VAL_Y
xlabel = "X (m)"
ylabel = "Y (m)"
if clim is None:
vmin, vmax = val.min(), val.max()
else:
vmin, vmax = clim[0], clim[1]
dat = ax.contourf(
a, b, val, ncontour, clim=(vmin, vmax), vmin=vmin, vmax=vmax, cmap=cmap
)
if showcontour:
ax.contour(a, b, val, levels, colors="k", linestyles="-")
if colorbar:
if logamp is True:
plt.colorbar(
dat, ax=ax, format="$10^{%.1f}$", ticks=np.linspace(vmin, vmax, 3)
)
else:
plt.colorbar(
dat, ax=ax, format="%.1e", ticks=np.linspace(vmin, vmax, 3)
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if view == "vec":
# nx = self.x.size
# nskip = int(nx / 15)
if component == "real":
# ax.quiver(a[::nskip], b[::nskip], (vec_a.real/VEC_amp)[::nskip,::nskip], (vec_b.real/VEC_amp)[::nskip,::nskip], color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a.real, vec_b.real, color="w", linewidth=0.5)
elif component == "imag":
# ax.quiver(a, b, vec_a.imag/VEC_amp, vec_b.imag/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a.imag, vec_b.imag, color="w", linewidth=0.5)
if component == "amplitude":
# ax.quiver(a, b, abs(vec_a)/VEC_amp, abs(vec_b)/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(a, b, abs(vec_a), abs(vec_b), color="w", linewidth=0.5)
elif component == "phase":
# ax.quiver(a, b, phase(vec_a)/VEC_amp, phase(vec_b)/VEC_amp, color="w", linewidth=0.5)
ax.streamplot(
a, b, phase(vec_a), phase(vec_b), color="w", linewidth=0.5
)
return ax, dat
def plot2D_TD(
self,
view="vec",
ncontour=20,
logamp=True,
clim=None,
showcontour=False,
levels=None,
ax=None,
colorbar=True,
cmap="viridis",
):
"""
2D visualization of dipole fields
"""
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if view == "amp" or view == "vec":
val = self.VEC_amp
elif view.upper() == "X":
val = self.VAL_X
elif view.upper() == "Y":
val = self.VAL_Y
elif view.upper() == "Z":
val = self.VAL_Z
if logamp is True:
zeroind = val == 0
val = np.log10(abs(val))
val[zeroind] = val[~zeroind].min()
if self.normal.upper() == "X":
a, b = self.y, self.z
vec_a, vec_b = self.VAL_Y, self.VAL_Z
xlabel = "Y (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Y":
a, b = self.x, self.z
vec_a, vec_b = self.VAL_X, self.VAL_Z
xlabel = "X (m)"
ylabel = "Z (m)"
elif self.normal.upper() == "Z":
a, b = self.x, self.y
vec_a, vec_b = self.VAL_X, self.VAL_Y
xlabel = "X (m)"
ylabel = "Y (m)"
if clim is None:
vmin, vmax = val.min(), val.max()
else:
vmin, vmax = clim[0], clim[1]
dat = ax.contourf(
a, b, val, ncontour, clim=(vmin, vmax), vmin=vmin, vmax=vmax, cmap=cmap
)
if showcontour:
ax.contour(a, b, val, levels, colors="k", linestyles="-")
if colorbar:
if logamp is True:
plt.colorbar(
dat, ax=ax, format="$10^{%.1f}$", ticks=np.linspace(vmin, vmax, 3)
)
else:
plt.colorbar(
dat, ax=ax, format="%.1e", ticks=np.linspace(vmin, vmax, 3)
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if view == "vec":
# nx = self.x.size
# nskip = int(nx / 15)
# ax.quiver(a[::nskip], b[::nskip], (vec_a.real/VEC_amp)[::nskip,::nskip], (vec_b.real/VEC_amp)[::nskip,::nskip], color="w", linewidth=0.5)
ax.streamplot(a, b, vec_a, vec_b, color="w", linewidth=0.5)
return ax, dat
def plot_profile_FD(
self,
start,
end,
nbmp,
component="real",
view="x",
logamp=True,
ax=None,
color="black",
):
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
if self.geometry.upper() == "PROFILE":
start = self.xyz[0]
end = self.xyz[-1]
self1D = copy.deepcopy(self)
# Pr for Profile
Pr = self.xyz
elif self.geometry.upper() == "GRID":
self1D = DataView()
Pr = np.zeros(shape=(nbmp, 3))
Pr[:, 0] = np.linspace(start[0], end[0], nbmp)
Pr[:, 1] = np.linspace(start[1], end[1], nbmp)
Pr[:, 2] = np.linspace(start[2], end[2], nbmp)
self1D.set_xyz(
Pr[:, 0], Pr[:, 1], Pr[:, 2], normal=self.normal, geometry="profile"
)
self1D.eval_2D(self.srcLoc, self.sig, self.f, self.orientation, self.func2D)
# Distance from starting point
D = np.sqrt(
(Pr[0, 0] - Pr[:, 0]) ** 2
+ (Pr[:, 1] - Pr[0, 1]) ** 2
+ (Pr[:, 2] - Pr[0, 2]) ** 2
)
# if self.normal.upper() == "Z":
# self1D.set_xyz(Pr[:,0],Pr[:,1],self.z,normal=self.normal,geometry="profile")
# elif self.normal.upper() == "Y":
# self1D.set_xyz(Pr[:,0],self.y,Pr[:,1],normal=self.normal,geometry="profile")
# elif self.normal.upper() == "X":
# self1D.set_xyz(self.x,Pr[:,0],Pr[:,1],normal=self.normal,geometry="profile")
pltvalue = []
if view.upper() == "X":
pltvalue = self1D.val_x
elif view.upper() == "Y":
pltvalue = self1D.val_y
elif view.upper() == "Z":
pltvalue = self1D.val_z
if component.upper() == "REAL":
ax.plot(D, pltvalue.real, color=color)
ax.set_ylabel("E field, Real part (V/m)")
elif component.upper() == "IMAG":
ax.plot(D, pltvalue.imag, color=color)
ax.set_ylabel("E field, Imag part (V/m)")
elif component.upper() == "AMPLITUDE":
if logamp is True:
ax.set_yscale("log")
ax.plot(D, np.absolute(pltvalue), color=color)
ax.set_ylabel("E field, Amplitude (V/m)")
elif component.upper() == "PHASE":
ax.plot(D, phase(pltvalue), color=color)
ax.set_ylabel("E field, Phase")
ax.set_xlabel("Distance from startinng point (m)")
return ax
def plot_1D_RI_section(self, start, end, nbmp, view, ax0, ax1):
self1D = DataView()
# Pr for Profile
Pr = np.zeros(shape=(nbmp, 2))
Pr[:, 0] = np.linspace(start[0], end[0], nbmp)
Pr[:, 1] = np.linspace(start[1], end[1], nbmp)
# Distance from starting point
D = np.sqrt((Pr[0, 0] - Pr[:, 0]) ** 2 + (Pr[:, 1] - Pr[0, 1]) ** 2)
if self.normal.upper() == "Z":
self1D.set_xyz(
Pr[:, 0], Pr[:, 1], self.z, normal=self.normal, geometry="profile"
)
elif self.normal.upper() == "Y":
self1D.set_xyz(
Pr[:, 0], self.y, Pr[:, 1], normal=self.normal, geometry="profile"
)
elif self.normal.upper() == "X":
self1D.set_xyz(
self.x, Pr[:, 0], Pr[:, 1], normal=self.normal, geometry="profile"
)
self1D.eval_2D(self.srcLoc, self.sig, self.f, self.orientation, self.func2D)
if view.upper() == "X":
ax0.plot(D, self1D.val_x.real, color="blue")
ax1.plot(D, self1D.val_x.imag, color="red")
elif view.upper() == "Y":
ax0.plot(D, self1D.val_y.real, color="blue")
ax1.plot(D, self1D.val_y.imag, color="red")
elif view.upper() == "Z":
ax0.plot(D, self1D.val_z.real, color="blue")
ax1.plot(D, self1D.val_z.imag, color="red")
ax0.set_xlabel("Distance from startinng point (m)")
ax1.set_xlabel("Distance from startinng point (Hz)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
return ax0, ax1
def plot_1D_AP_section(self, start, end, nbmp, view, ax0, ax1):
self1D = copy.deepcopy(self)
# Pr for Profile
Pr = np.zeros(shape=(nbmp, 2))
Pr[:, 0] = np.linspace(start[0], end[0], nbmp)
Pr[:, 1] = np.linspace(start[1], end[1], nbmp)
# Distance from starting point
D = np.sqrt((Pr[0, 0] - Pr[:, 0]) ** 2 + (Pr[:, 1] - Pr[0, 1]) ** 2)
if self.normal.upper() == "Z":
self1D.set_xyz(
Pr[:, 0], Pr[:, 1], self.z, normal=self.normal, geometry="profile"
)
elif self.normal.upper() == "Y":
self1D.set_xyz(
Pr[:, 0], self.y, Pr[:, 1], normal=self.normal, geometry="profile"
)
elif self.normal.upper() == "X":
self1D.set_xyz(
self.x, Pr[:, 0], Pr[:, 1], normal=self.normal, geometry="profile"
)
self1D.eval_2D(self.srcLoc, self.sig, self.f, self.orientation, self.func2D)
if view.upper() == "X":
ax0.plot(D, np.absolute(self1D.val_x), color="blue")
ax1.plot(D, phase(self1D.val_x), color="red")
elif view.upper() == "Y":
ax0.plot(D, np.absolute(self1D.val_y), color="blue")
ax1.plot(D, phase(self1D.val_y), color="red")
elif view.upper() == "Z":
ax0.plot(D, np.absolute(self1D.val_z), color="blue")
ax1.plot(D, phase(self1D.val_z), color="red")
ax0.set_xlabel("Distance from startinng point (m)")
ax1.set_xlabel("Distance from startinng point (Hz)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
return ax0, ax1
def plot1D_FD(
self,
component="real",
view="x",
abscisse="Conductivity",
slic=None,
logamp=True,
ax=None,
legend=True,
color="black",
):
if ax is None:
plt.figure(figsize=(6.5, 5))
ax = plt.subplot(111)
slice_ind = 0
if slic is None:
slice_ind = np.minimum(len(self.sigvec), len(self.fvec)) / 2
if abscisse.upper() == "CONDUCTIVITY":
slic = self.log_fvec[slice_ind]
elif abscisse.upper() == "FREQUENCY":
slic = self.log_sigvec[slice_ind]
pltvalue = []
if view.upper() == "X":
pltvalue = self.val_xfs
elif view.upper() == "Y":
pltvalue = self.val_yfs
elif view.upper() == "Z":
pltvalue = self.val_zfs
if component.upper() == "REAL":
pltvalue = pltvalue.real
ax.set_ylabel("E field, Real part (V/m)")
elif component.upper() == "IMAG":
pltvalue = pltvalue.imag
ax.set_ylabel("E field, Imag part (V/m)")
elif component.upper() == "AMPLITUDE":
pltvalue = np.absolute(pltvalue)
ax.set_ylabel("E field, Amplitude (V/m)")
if logamp is True:
ax.set_yscale("log")
elif component.upper() == "PHASE":
pltvalue = phase(pltvalue)
ax.set_ylabel("E field, Phase")
if component.upper() == "PHASOR":
if abscisse.upper() == "CONDUCTIVITY":
slice_ind = np.where(slic == self.log_fvec)[0][0]
ax.plot(
pltvalue.real[:, slice_ind],
pltvalue.imag[:, slice_ind],
color=color,
)
ax.set_xlabel("E field, Real part (V/m)")
ax.set_ylabel("E field, Imag part(V/m)")
axymin = pltvalue.imag[:, slice_ind].min()
axymax = pltvalue.imag[:, slice_ind].max()
if legend:
ax.annotate(
("f =%0.5f Hz") % (self.fvec[slice_ind]),
xy=(
(
pltvalue.real[:, slice_ind].min()
+ pltvalue.real[:, slice_ind].max()
)
/ 2.0,
axymin + (axymax - axymin) / 4.0,
),
xycoords="data",
xytext=(
(
pltvalue.real[:, slice_ind].min()
+ pltvalue.real[:, slice_ind].max()
)
/ 2.0,
axymin + (axymax - axymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
elif abscisse.upper() == "FREQUENCY":
slice_ind = np.where(slic == self.log_sigvec)[0][0]
ax.plot(
pltvalue.real[slice_ind, :],
pltvalue.imag[slice_ind, :],
color=color,
)
ax.set_xlabel("E field, Real part (V/m)")
ax.set_ylabel("E field, Imag part(V/m)")
axymin = pltvalue.imag[slice_ind, :].min()
axymax = pltvalue.imag[slice_ind, :].max()
if legend:
ax.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[slice_ind]),
xy=(
(
pltvalue.real[slice_ind, :].min()
+ pltvalue.real[slice_ind, :].max()
)
/ 2.0,
axymin + (axymax - axymin) / 4.0,
),
xycoords="data",
xytext=(
(
pltvalue.real[slice_ind, :].min()
+ pltvalue.real[slice_ind, :].max()
)
/ 2.0,
axymin + (axymax - axymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
else:
if abscisse.upper() == "CONDUCTIVITY":
ax.set_xlabel("Conductivity (S/m)")
ax.set_xscale("log")
slice_ind = np.where(slic == self.log_fvec)[0][0]
ax.plot(self.sigvec, pltvalue[:, slice_ind], color=color)
axymin = pltvalue[:, slice_ind].min()
axymax = pltvalue[:, slice_ind].max()
if legend:
ax.annotate(
("f =%0.5f Hz") % (self.fvec[slice_ind]),
xy=(
10.0
** (
(
np.log10(self.sigvec.min())
+ np.log10(self.sigvec.max())
)
/ 2
),
axymin + (axymax - axymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** (
(
np.log10(self.sigvec.min())
+ np.log10(self.sigvec.max())
)
/ 2
),
axymin + (axymax - axymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
elif abscisse.upper() == "FREQUENCY":
ax.set_xlabel("Frequency (Hz)")
ax.set_xscale("log")
slice_ind = np.where(slic == self.log_sigvec)[0][0]
ax.plot(self.fvec, pltvalue[slice_ind, :], color=color)
axymin = pltvalue[slice_ind, :].min()
axymax = pltvalue[slice_ind, :].max()
if legend:
ax.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[slice_ind]),
xy=(
10.0
** (
(np.log10(self.fvec.min()) + np.log10(self.fvec.max()))
/ 2
),
axymin + (axymax - axymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** (
(np.log10(self.fvec.min()) + np.log10(self.fvec.max()))
/ 2
),
axymin + (axymax - axymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax
def plot_1D_RI_f_x(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec, self.val_xfs.real[sigind, :], color="blue")
ax1.plot(self.fvec, self.val_xfs.imag[sigind, :], color="red")
ax0ymin = self.val_xfs.real[sigind, :].min()
ax0ymax = self.val_xfs.real[sigind, :].max()
ax1ymin = self.val_xfs.imag[sigind, :].min()
ax1ymax = self.val_xfs.imag[sigind, :].max()
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_AP_f_x(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec, np.absolute(self.val_xfs[sigind, :]), color="blue")
ax1.plot(self.fvec, phase(self.val_xfs[sigind, :]), color="red")
ax0ymin = np.absolute(self.val_xfs[sigind, :]).min()
ax0ymax = np.absolute(self.val_xfs[sigind, :]).max()
ax1ymin = phase(self.val_xfs[sigind, :]).min()
ax1ymax = phase(self.val_xfs[sigind, :]).max()
# ax2.plot(self.fvec[freqind]*np.ones_like(self.val_xfs[sigind, :]),
# np.linspace(ax2ymin,ax2ymax,len(self.val_xfs[sigind, :])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_RI_sig_x(self, absloc, coordloc, ax0, ax1, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Conductivity (S/m)")
ax1.set_xlabel("Conductivity (S/m)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
ax0.plot(self.sigvec, self.val_xfs.real[:, freqind], color="blue")
ax1.plot(self.sigvec, self.val_xfs.imag[:, freqind], color="red")
ax0ymin = self.val_xfs.real[:, freqind].min()
ax0ymax = self.val_xfs.real[:, freqind].max()
ax1ymin = self.val_xfs.imag[:, freqind].min()
ax1ymax = self.val_xfs.imag[:, freqind].max()
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_AP_sig_x(self, absloc, coordloc, ax0, ax1, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Conductivity (S/m)")
ax1.set_xlabel("Conductivity (S/m)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
ax0.plot(self.sigvec, np.absolute(self.val_xfs[:, freqind]), color="blue")
ax1.plot(self.sigvec, phase(self.val_xfs[:, freqind]), color="red")
ax0ymin = np.absolute(self.val_xfs[:, freqind]).min()
ax0ymax = np.absolute(self.val_xfs[:, freqind]).max()
ax1ymin = phase(self.val_xfs[:, freqind]).min()
ax1ymax = phase(self.val_xfs[:, freqind]).max()
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_phasor_f_x(self, absloc, coordloc, ax, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax.plot(self.val_xfs.real[sigind, :], self.val_xfs.imag[sigind, :])
def plot_1D_phasor_sig_x(self, absloc, coordloc, ax, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax.plot(self.val_xfs.real[:, freqind], self.val_xfs.imag[:, freqind])
def plot_1D_RI_f_y(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec, self.val_yfs.real[sigind, :], color="blue")
ax1.plot(self.fvec, self.val_yfs.imag[sigind, :], color="red")
ax0ymin = self.val_yfs.real[sigind, :].min()
ax0ymax = self.val_yfs.real[sigind, :].max()
ax1ymin = self.val_yfs.imag[sigind, :].min()
ax1ymax = self.val_yfs.imag[sigind, :].max()
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_AP_f_y(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_yfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_yfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec, np.absolute(self.val_yfs[sigind, :]), color="blue")
ax1.plot(self.fvec, phase(self.val_yfs[sigind, :]), color="red")
ax0ymin, ax0ymax = (
np.absolute(self.val_yfs[sigind, :]).min(),
np.absolute(self.val_yfs[sigind, :]).max(),
)
ax1ymin, ax1ymax = (
phase(self.val_yfs[sigind, :]).min(),
phase(self.val_yfs[sigind, :]).max(),
)
# ax2.plot(self.fvec[freqind]*np.ones_like(self.val_yfs[sigind, :]),
# np.linspace(ax2ymin,ax2ymax,len(self.val_yfs[sigind, :])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_RI_sig_y(self, absloc, coordloc, ax0, ax1, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Conductivity (S/m)")
ax1.set_xlabel("Conductivity (S/m)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
ax0.plot(self.sigvec, self.val_yfs.real[:, freqind], color="blue")
ax1.plot(self.sigvec, self.val_yfs.imag[:, freqind], color="red")
ax0ymin, ax0ymax = (
self.val_yfs.real[:, freqind].min(),
self.val_yfs.real[:, freqind].max(),
)
ax1ymin, ax1ymax = (
self.val_yfs.imag[:, freqind].min(),
self.val_yfs.imag[:, freqind].max(),
)
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_yfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_yfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_AP_sig_y(self, absloc, coordloc, ax0, ax1, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Conductivity (S/m)")
ax1.set_xlabel("Conductivity (S/m)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
ax0.plot(self.sigvec, np.absolute(self.val_yfs[:, freqind]), color="blue")
ax1.plot(self.sigvec, phase(self.val_yfs[:, freqind]), color="red")
ax0ymin, ax0ymax = (
np.absolute(self.val_yfs[:, freqind]).min(),
np.absolute(self.val_yfs[:, freqind]).max(),
)
ax1ymin, ax1ymax = (
phase(self.val_yfs[:, freqind]).min(),
phase(self.val_yfs[:, freqind]).max(),
)
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_yfs[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_yfs[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("f =%0.5f Hz") % (self.fvec[freqind]),
xy=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0
** ((np.log10(self.sigvec.min()) + np.log10(self.sigvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_phasor_f_y(self, absloc, coordloc, ax, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax.plot(self.val_yfs.real[sigind, :], self.val_yfs.imag[sigind, :])
def plot_1D_phasor_sig_y(self, absloc, coordloc, ax, freqind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax.plot(self.val_yfs.real[:, freqind], self.val_yfs.imag[:, freqind])
def plot_1D_RI_f_z(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Real part (V/m)")
ax1.set_ylabel("E field, Imag part (V/m)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_xfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_xfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec, self.val_zfs.real[sigind, :], color="blue")
ax1.plot(self.fvec, self.val_zfs.imag[sigind, :], color="red")
ax0ymin, ax0ymax = (
self.val_zfs.real[sigind, :].min(),
self.val_zfs.real[sigind, :].max(),
)
ax1ymin, ax1ymax = (
self.val_zfs.imag[sigind, :].min(),
self.val_zfs.imag[sigind, :].max(),
)
ax0.set_ylim(ax0ymin, ax0ymax)
ax1.set_ylim(ax1ymin, ax1ymax)
ax0.set_xscale("log")
ax1.set_xscale("log")
ax0.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax0ymin + (ax0ymax - ax0ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
ax1.annotate(
("$\sigma$ =%0.5f S/m") % (self.sigvec[sigind]),
xy=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
xycoords="data",
xytext=(
10.0 ** ((np.log10(self.fvec.min()) + np.log10(self.fvec.max())) / 2),
ax1ymin + (ax1ymax - ax1ymin) / 4.0,
),
textcoords="data",
fontsize=14.0,
)
return ax0, ax1
def plot_1D_AP_f_z(self, absloc, coordloc, ax0, ax1, sigind):
if self.normal.upper() == "Z":
obsLoc = np.c_[absloc, coordloc, self.z]
elif self.normal.upper() == "Y":
obsLoc = np.c_[absloc, self.y, coordloc]
elif self.normal.upper() == "X":
obsLoc = np.c_[self.x, absloc, coordloc]
self.eval_loc(
self.srcLoc, obsLoc, self.sigvec, self.fvec, self.orientation, self.func1D
)
ax0.set_xlabel("Frequency (Hz)")
ax1.set_xlabel("Frequency (Hz)")
ax0.set_ylabel("E field, Amplitude (V/m)")
ax1.set_ylabel("E field, Phase (deg)")
# ax0.plot(self.sigvec[sigind]*np.ones_like(self.val_zfs.real[:, freqind]),
# np.linspace(ax0ymin,ax0ymax,len(self.val_zfs.real[:, freqind])),linestyle="dashed", color="black",linewidth=3.0)
ax0.plot(self.fvec,
|
np.absolute(self.val_zfs[sigind, :])
|
numpy.absolute
|
# Copyright 2021 portfolio-robustfpm-framework Authors
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
r""" This submodule implements a :class:`Lattice` class for working with n-dimensional lattices on :math:`\mathbb{R}^{n}`
The rationale behind such lattice is that it is much easier to work with integer coordinates than with real ones.
:class:`Lattice` incapsulates such a uniform lattice.
"""
import numpy as np
from ..util import coalesce, cartesian_product
from .set_handler import ISetHandler
__all__ = ['Lattice']
class Lattice:
r""" Handler for the uniform or logscale n-dimensional lattice. Provides functionality
for mapping integer point coordinates to :math:`\mathbb{R}^{n}` and vice versa.
Parameters
----------
delta : array_like, size = n x 1
Lattice steps.
logscale : bool, default = False
If True, the lattice is considered logscale. Default is False.
center : array_like, size = n x 1
Point from :math:`\mathbb{R}^{n}` which corresponds to zero coordinates on a lattice.
dtype : numeric np.dtype
Type for points in :math:`\mathbb{R}^{n}`. Default is np.float64.
dtype_p : numeric np.dtype
Type for lattice coordinates. Default is np.int64.
Notes
------
The class upholds the following notation: 'x' means points from :math:`\mathbb{R}^{n}`,
'point' means points from the lattice.
"""
def __init__(self, delta, logscale=False, center=None, dtype=None, dtype_p=None):
self.dtype = coalesce(dtype, np.float64)
self.dtype_p = coalesce(dtype_p, np.int64)
self.delta = np.asarray(delta, dtype=self.dtype)
self.logscale = logscale
self.center = np.asarray(coalesce(center, self.delta * 0), dtype=self.dtype)
@property
def dim(self):
""" Returns lattice's dimension
Returns
-------
int
Lattice's dimesion
"""
return self.delta.shape[0]
def _x_trans(self, x):
r""" Utility function for reducing logscale lattice logic to uniform lattice.
Parameters
----------
x : array_like
Points from :math:`\mathbb{R}^{n}`.
Returns
-------
np.ndarray
logarithm of `x` if the lattice is logscale, otherwise `x`.
"""
return x if self.logscale == False else np.log(x)
def _x_trans_inv(self, x):
""" Inverse transform to :meth:`_x_trans()`.
"""
return x if self.logscale == False else
|
np.exp(x)
|
numpy.exp
|
from scipy.special import ive
import numpy as np
import numpy.linalg as la
def to_cartesian(phi):
"""
Converts from spherical hyperpolars
:params Phi [nxd ndarray]: array vectors of angles
:return X [nx(d+1) ndarray]: Dataset in cartesian coordinates
"""
d = len(phi)
X = np.cos(phi[0])
# can probably be vectorised with a cumulative product
for i in range(1, d):
X += np.prod(np.sin(phi[:i]), axis=1) * np.cos(phi[i])
X += np.prod(np.sin(phi))
return X
def fit_mean_direction(X):
return X.sum(axis=0) / la.norm(X.sum(axis=0))
def fit_concentration(X):
"""
Computes the vMF MLE sol for the concentration parameter
NOTE: This is an approximate solution to a transcendental eq
:param X [nxd ndarray]: Design matrix of normalised word vectors
:return [float]: MLE concentration parameter solution
"""
X = np.array(X)
n, d = X.shape
R = la.norm(X.sum(axis=0)) / n
Rs = R**2
return R * (d - Rs) / (1.0 - Rs)
def to_hypersphericalPolars(mu):
"""
Return the d-1 angles describing mu
:param mu [dx1 ndarray]: unit norm d dimensional vector
"""
mu_sq = mu ** 2
rev_cumsum = np.cumsum(mu_sq[::-1])[::-1]
rev_cumnorms = np.sqrt(rev_cumsum)
thetas = np.arccos(mu[:-1] / rev_cumnorms[:-1])
norm = la.norm(mu[-2:])
thetas[-1] = np.sign(mu[-1]) * np.arccos(mu[-2] / norm)
return thetas
def log_vMF_gradient(opt_mu, k, x):
"""
:params opt_mu[dx1 ndarray]: optimum direction
:params k[float]: optimum concentration
:params x[d, ndarray]: A datapoint to differentiate about
"""
def analytic_grad(k_phi_mu):
kappa = k_phi_mu[0]
phi = k_phi_mu[1:]
elementwise = x * opt_mu
cdots = np.cumsum(elementwise[::-1])[::-1]
tans = np.tan(phi)
comps = -elementwise[:-1] * tans
invs = cdots[1:] / tans
D = x.shape[0]
grad = invs + comps
grad *= kappa
v = D * 1.0 / 2 - 1
grad_kappa = np.dot(opt_mu, x) - ive(v+1, k) / ive(v, k)
return np.concatenate([[grad_kappa], grad])
phi_mu = to_hypersphericalPolars(opt_mu)
k_phi_mu =
|
np.concatenate(([k], phi_mu), axis=0)
|
numpy.concatenate
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Provide the Cartesian acceleration task.
The Cartesian acceleration task tries to impose a desired pose, velocity and acceleration profiles for a distal
link with respect to a base link, or world frame.
Before presenting the optimization problem, here is a small reminder. The acceleration is the time derivative of
the velocity, i.e. :math:`a = \frac{dv}{dt}` where the cartesian velocities are related to joint velocities by
:math:`v = J(q) \dot{q}` where :math:`J(q)` is the Jacobian, thus deriving that expression wrt time gives us:
.. math:: a = \frac{d}{dt} v = \frac{d}{dt} J(q) \dot{q} = J(q) \ddot{q} + \dot{J}(q) \dot{q}.
Now, we can formulate our minimization problem as:
.. math:: || J(q) \ddot{q} + \dot{J} \dot{q} - (a_d + K_d (v_d - v) + K_p e) ||^2,
where :math:`\ddot{q}` are the joint accelerations being optimized, :math:`a_d` are the desired cartesian
accelerations, :math:`v_d = [v_d^\top, \omega_d^\top]^\top` are the desired cartesian velocities, :math:`v` are the
current cartesian velocities of the distal link wrt the base, :math:`J(q) \in \mathbb{R}^{6 \times N}` is the
Jacobian taken from the base to the distal link, :math:`K_p` and :math:`K_d` are the stiffness and damping gains
respectively, :math:`e \in \mathbb{R}^{6}` is the error which is the concatenation of the position error given by
:math:`e_{p} = (x_d - x)` (with :math:`x_d` being the desired position, and :math:`x` the current position), and the
orientation error given by (if expressed as quaternions :math:`o = {s, v}` where :math:`s` is the real scalar part,
and :math:`v` is the vector part) :math:`e_{o} = s v_d - s_d v - v_d \cross v`, and :math:`\dot{x}_d` is the
desired cartesian velocity for the distal link with respect to the base link.
The above formulation is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting
:math:`A = J(q)`, :math:`x = \ddot{q}`, and :math:`b = - \dot{J} \dot{q} + (a_d + K_d (v_d - v) + K_p e)`.
This task can, for instance, be used for foot pose tracking when this one is not in contact with the ground. If
the foot is in contact, we switch to a foot damping task which can be achieved by setting
:math:`a_d = v_d = e = 0` and thus we are trying to solve :math:`||J(q) \ddot{q} - \dot{J} \dot{q} - K_d v_d||^2`.
Inverse dynamics
----------------
Once the optimal joint accelerations :math:`\ddot{q}^*` have been computed, we can use inverse dynamics to
compute the corresponding torques to apply on the joints. This is given by:
.. math:: \tau = H(q) \ddot{q} + N(q,\dot{q)}
where :math:`H(q)` is the inertia joint matrix, and N(q, \dot{q}) is a vector force that accounts for all the
other non-linear forces acting on the system (Coriolis, centrifugal, gravity, external forces, friction, etc.).
Important notes:
- You don't have to specify the whole pose, you can also only specify the position or orientation.
- You can also only specify the desired cartesian accelerations by setting `kp` and `kd` to zero; you don't have
neither to provide the desired cartesian velocities, position or orientation.
.. seealso:: `tasks/velocity/cartesian.py` and `tasks/torque/cartesian_impedance_control.py`
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
import numpy as np
from pyrobolearn.priorities.tasks import JointAccelerationTask
from pyrobolearn.utils.transformation import quaternion_error
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME> (C++)", "<NAME> (insight)", "<NAME> (Python + doc)"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class CartesianAccelerationTask(JointAccelerationTask):
r"""Cartesian Acceleration Task
The Cartesian acceleration task tries to impose a desired pose, velocity and acceleration profiles for a distal
link with respect to a base link, or world frame.
Before presenting the optimization problem, here is a small reminder. The acceleration is the time derivative of
the velocity, i.e. :math:`a = \frac{dv}{dt}` where the cartesian velocities are related to joint velocities by
:math:`v = J(q) \dot{q}` where :math:`J(q)` is the Jacobian, thus deriving that expression wrt time gives us:
.. math:: a = \frac{d}{dt} v = \frac{d}{dt} J(q) \dot{q} = J(q) \ddot{q} + \dot{J}(q) \dot{q}.
Now, we can formulate our minimization problem as:
.. math:: || J(q) \ddot{q} + \dot{J} \dot{q} - (a_d + K_d (v_d - v) + K_p e) ||^2,
where :math:`\ddot{q}` are the joint accelerations being optimized, :math:`a_d` are the desired cartesian
accelerations, :math:`v_d = [v_d^\top, \omega_d^\top]^\top` are the desired cartesian velocities, :math:`v` are the
current cartesian velocities of the distal link wrt the base, :math:`J(q) \in \mathbb{R}^{6 \times N}` is the
Jacobian taken from the base to the distal link, :math:`K_p` and :math:`K_d` are the stiffness and damping gains
respectively, :math:`e \in \mathbb{R}^{6}` is the error which is the concatenation of the position error given by
:math:`e_{p} = (x_d - x)` (with :math:`x_d` being the desired pose, and :math:`x` the current pose), and the
orientation error given by (if expressed as quaternions :math:`o = {s, v}` where :math:`s` is the real scalar part,
and :math:`v` is the vector part) :math:`e_{o} = s v_d - s_d v - v_d \cross v`, and :math:`\dot{x}_d` is the
desired cartesian velocity for the distal link with respect to the base link.
The above formulation is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting
:math:`A = J(q)`, :math:`x = \ddot{q}`, and :math:`b = - \dot{J} \dot{q} + (a_d + K_d (v_d - v) + K_p e)`.
This task can, for instance, be used for foot pose tracking when this one is not in contact with the ground. If
the foot is in contact, we switch to a foot damping task which can be achieved by setting
:math:`a_d = v_d = e = 0` and thus we are trying to solve :math:`||J(q) \ddot{q} - \dot{J} \dot{q} - K_d v_d||^2`.
Inverse dynamics
----------------
Once the optimal joint accelerations :math:`\ddot{q}^*` have been computed, we can use inverse dynamics to
compute the corresponding torques to apply on the joints. This is given by:
.. math:: \tau = H(q) \ddot{q} + N(q,\dot{q)}
where :math:`H(q)` is the inertia joint matrix, and N(q, \dot{q}) is a vector force that accounts for all the
other non-linear forces acting on the system (Coriolis, centrifugal, gravity, external forces, friction, etc.).
.. seealso:: `tasks/velocity/cartesian.py` and `tasks/torque/cartesian_impedance_control.py`
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
def __init__(self, model, distal_link, base_link=None, local_position=(0, 0, 0), desired_position=None,
desired_orientation=None, desired_linear_velocity=None, desired_angular_velocity=None,
desired_linear_acceleration=None, desired_angular_acceleration=None,
kp_position=1., kp_orientation=1., kd_linear=1., kd_angular=1., weight=1., constraints=[]):
"""
Initialize the task.
Args:
model (ModelInterface): model interface.
distal_link (int, str): distal link id or name.
base_link (int, str, None): base link id or name. If None, it will be the world.
local_position (np.array[float[3]]): local position on the distal link.
desired_position (np.array[float[3]], None): desired position of distal link wrt the base. If None, it
will not be taken into account.
desired_orientation (np.array[float[4]], None): desired orientation (expressed as quaternion [x,y,z,w]) of
distal link wrt the base. If None, it will not be taken into account.
desired_linear_velocity (np.array[float[3]], None): desired linear velocity of distal link wrt the base.
If None, it will be set to zero.
desired_angular_velocity (np.array[float[3]], None): desired angular velocity of distal link wrt the base.
If None, it will be set to zero.
desired_linear_acceleration (np.array[float[3]], None): desired linear acceleration of distal link wrt
the base. If None, it will be set to zero.
desired_angular_acceleration (np.array[float[3]], None): desired angular acceleration of distal link wrt
the base. If None, it will be set to zero.
kp_position (float, np.array[float[3,3]]): position stiffness gain.
kp_orientation (float, np.array[float[3,3]]): orientation stiffness gain.
kd_linear (float, np.array[float[3,3]]): linear velocity damping gain.
kd_angular (float, np.array[float[3,3]]): angular velocity damping gain.
weight (float, np.array[float[6,6]], np.array[float[3,3]]): weight scalar or matrix associated to the task.
constraints (list[Constraint]): list of constraints associated with the task.
"""
super(CartesianAccelerationTask, self).__init__(model=model, weight=weight, constraints=constraints)
# define variables
self.distal_link = self.model.get_link_id(distal_link)
self.base_link = self.model.get_link_id(base_link) if base_link is not None else base_link
self.local_position = local_position
if base_link is not None:
raise NotImplementedError("Currently, the base_link can only be set to the world (None).")
# gains
self.kp_position = kp_position
self.kp_orientation = kp_orientation
self.kd_linear = kd_linear
self.kd_angular = kd_angular
# define desired references
self.desired_position = desired_position
self.desired_orientation = desired_orientation
self.desired_linear_velocity = desired_linear_velocity
self.desired_angular_velocity = desired_angular_velocity
self.desired_linear_acceleration = desired_linear_acceleration
self.desired_angular_acceleration = desired_angular_acceleration
# first update
self.update()
##############
# Properties #
##############
@property
def desired_position(self):
"""Get the desired cartesian position for the distal link wrt the base."""
return self._des_pos
@desired_position.setter
def desired_position(self, position):
"""Set the desired cartesian position for the distal link wrt the base."""
if position is not None:
if not isinstance(position, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired position to be a np.array, instead got: "
"{}".format(type(position)))
position = np.asarray(position)
if len(position) != 3:
raise ValueError("Expecting the given desired position array to be of length 3, but instead got: "
"{}".format(len(position)))
self._des_pos = position
@property
def desired_orientation(self):
"""Get the desired cartesian orientation (expressed as a quaternion [x,y,z,w]) for the distal link wrt the
base."""
return self._des_quat
@desired_orientation.setter
def desired_orientation(self, orientation):
"""Set the desired cartesian orientation (expressed as a quaternion [x,y,z,w]) for the distal link wrt the
base."""
if orientation is not None:
if not isinstance(orientation, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired orientation to be a np.array, instead got: "
"{}".format(type(orientation)))
orientation = np.asarray(orientation)
if len(orientation) != 4:
raise ValueError(
"Expecting the given desired orientation array to be of length 4, but instead got: "
"{}".format(len(orientation)))
self._des_quat = orientation
@property
def desired_linear_velocity(self):
"""Get the desired cartesian linear velocity of the distal link wrt the base."""
return self._des_lin_vel
@desired_linear_velocity.setter
def desired_linear_velocity(self, velocity):
"""Set the desired cartesian linear velocity of the distal link wrt the base."""
if velocity is None:
velocity = np.zeros(3)
elif not isinstance(velocity, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired linear velocity to be a np.array, instead got: "
"{}".format(type(velocity)))
velocity = np.asarray(velocity)
if len(velocity) != 3:
raise ValueError("Expecting the given desired linear velocity array to be of length 3, but instead "
"got: {}".format(len(velocity)))
self._des_lin_vel = velocity
@property
def desired_angular_velocity(self):
"""Get the desired cartesian angular velocity of the distal link wrt the base."""
return self._des_ang_vel
@desired_angular_velocity.setter
def desired_angular_velocity(self, velocity):
"""Set the desired cartesian angular velocity of the distal link wrt the base."""
if velocity is None:
velocity = np.zeros(3)
elif not isinstance(velocity, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired angular velocity to be a np.array, instead got: "
"{}".format(type(velocity)))
velocity = np.asarray(velocity)
if len(velocity) != 3:
raise ValueError("Expecting the given desired angular velocity array to be of length 3, but instead "
"got: {}".format(len(velocity)))
self._des_ang_vel = velocity
@property
def desired_velocity(self):
"""Return the linear and angular velocity."""
return np.concatenate((self._des_lin_vel, self._des_ang_vel))
@property
def desired_linear_acceleration(self):
"""Get the desired cartesian linear acceleration of the distal link wrt the base."""
return self._des_lin_acc
@desired_linear_acceleration.setter
def desired_linear_acceleration(self, acceleration):
"""Set the desired cartesian linear acceleration of the distal link wrt the base."""
if acceleration is None:
acceleration = np.zeros(3)
elif not isinstance(acceleration, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired linear acceleration to be a np.array, instead got: "
"{}".format(type(acceleration)))
acceleration = np.asarray(acceleration)
if len(acceleration) != 3:
raise ValueError("Expecting the given desired linear acceleration array to be of length 3, but instead "
"got: {}".format(len(acceleration)))
self._des_lin_acc = acceleration
@property
def desired_angular_acceleration(self):
"""Get the desired cartesian angular acceleration of the distal link wrt the base."""
return self._des_ang_acc
@desired_angular_acceleration.setter
def desired_angular_acceleration(self, acceleration):
"""Set the desired cartesian angular acceleration of the distal link wrt the base."""
if acceleration is None:
acceleration = np.zeros(3)
elif not isinstance(acceleration, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired angular acceleration to be a np.array, instead got: "
"{}".format(type(acceleration)))
acceleration = np.asarray(acceleration)
if len(acceleration) != 3:
raise ValueError("Expecting the given desired angular acceleration array to be of length 3, but instead "
"got: {}".format(len(acceleration)))
self._des_ang_acc = acceleration
@property
def desired_acceleration(self):
"""Return the linear and angular acceleration."""
return np.concatenate((self._des_lin_acc, self._des_ang_acc))
@property
def x_desired(self):
"""Get the desired cartesian pose for the distal link wrt to the base."""
position = self.desired_position
orientation = self.desired_orientation
if position is not None:
if orientation is not None:
return np.concatenate((position, orientation))
return position
return orientation
@x_desired.setter
def x_desired(self, x_d):
"""Set the desired cartesian pose for the distal link wrt to the base."""
if x_d is not None:
if not isinstance(x_d, (np.ndarray, list, tuple)):
raise TypeError(
"Expecting the given desired pose to be a np.array, instead got: {}".format(type(x_d)))
x_d =
|
np.asarray(x_d)
|
numpy.asarray
|
import os
import numpy as np
import pypinyin
import tensorflow as tf
import logging
from augmentations.augments import Augmentation
from utils.speech_featurizers import SpeechFeaturizer
from utils.text_featurizers import TextFeaturizer
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class AM_DataLoader():
def __init__(self, config_dict, training=True):
self.speech_config = config_dict['speech_config']
self.text_config = config_dict['decoder_config']
self.augment_config = config_dict['augments_config']
self.streaming=self.speech_config['streaming']
self.chunk=self.speech_config['sample_rate']*self.speech_config['streaming_bucket']
self.batch = config_dict['learning_config']['running_config']['batch_size']
self.speech_featurizer = SpeechFeaturizer(self.speech_config)
self.text_featurizer = TextFeaturizer(self.text_config)
self.make_file_list(self.speech_config['train_list'] if training else self.speech_config['eval_list'], training)
self.augment = Augmentation(self.augment_config)
self.init_text_to_vocab()
self.epochs = 1
self.LAS = False
self.steps = 0
def load_state(self, outdir):
try:
dg_state = np.load(os.path.join(outdir, 'dg_state.npz'))
self.epochs = int(dg_state['epoch'])
self.train_offset = int(dg_state['train_offset'])
train_list = dg_state['train_list'].tolist()
if len(train_list) != len(self.train_list):
logging.info('history train list not equal new load train list ,data loader use init state')
self.epochs = 0
self.train_offset = 0
except FileNotFoundError:
logging.info('not found state file,init state')
except:
logging.info('load state falied,use init state')
def save_state(self, outdir):
np.savez(os.path.join(outdir, 'dg_state.npz'), epoch=self.epochs, train_offset=self.train_offset,
train_list=self.train_list)
def return_data_types(self):
if self.LAS:
return ( tf.float32, tf.int32, tf.int32, tf.int32, tf.float32)
else:
return (tf.float32, tf.int32, tf.int32, tf.int32)
def return_data_shape(self):
f, c = self.speech_featurizer.compute_feature_dim()
if self.LAS:
return (
tf.TensorShape([None, None, 1]) if self.speech_config['use_mel_layer'] else tf.TensorShape(
[None, None, f, c]),
tf.TensorShape([None, ]),
tf.TensorShape([None, None]),
tf.TensorShape([None, ]),
tf.TensorShape([None, None, None])
)
else:
return (
tf.TensorShape([None, None, 1]) if self.speech_config['use_mel_layer'] else tf.TensorShape(
[None, None, f, c]),
tf.TensorShape([None, ]),
tf.TensorShape([None, None]),
tf.TensorShape([None, ])
)
def get_per_epoch_steps(self):
return len(self.train_list) // self.batch
def eval_per_epoch_steps(self):
return len(self.test_list) // self.batch
def init_text_to_vocab(self):
pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],
'调小': [['tiáo'], ['xiǎo']],
'调亮': [['tiáo'], ['liàng']],
'调暗': [['tiáo'], ['àn']],
'肖': [['xiāo']],
'英雄传': [['yīng'], ['xióng'], ['zhuàn']],
'新传': [['xīn'], ['zhuàn']],
'外传': [['wài'], ['zhuàn']],
'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]
})
def text_to_vocab_func(txt):
pins = pypinyin.pinyin(txt)
pins = [i[0] for i in pins]
return pins
self.text_to_vocab = text_to_vocab_func
def make_file_list(self, wav_list, training=True):
with open(wav_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
num = len(data)
if training:
self.train_list = data[:int(num * 0.99)]
self.test_list = data[int(num * 0.99):]
np.random.shuffle(self.train_list)
self.train_offset = 0
self.test_offset = 0
logging.info('load train list {} test list{}'.format(len(self.train_list),len(self.test_list)))
else:
self.test_list = data
self.offset = 0
def only_chinese(self, word):
txt = ''
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
txt += ch
else:
continue
return txt
def eval_data_generator(self):
sample = self.test_list[self.offset:self.offset + self.batch]
self.offset += self.batch
speech_features = []
input_length = []
y1 = []
label_length1 = []
max_input = 0
max_label1 = 0
for i in sample:
wp, txt = i.strip().split('\t')
txt = txt.replace(' ', '')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
logging.info('{} wav too short < 25ms,skip'.format(wp))
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}) ,skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if self.speech_config['use_mel_layer']:
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce=self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *self.speech_config['stride_ms']
in_len = len(speech_feature) //self.chunk
if len(speech_feature) %self.chunk!=0:
in_len+=1
chunk_times=self.chunk//reduce
if self.chunk%reduce!=0:
chunk_times+=1
in_len*=chunk_times
else:
speech_feature = self.speech_featurizer.extract(data)
in_len = int(speech_feature.shape[0] // self.speech_config['reduction_factor'])
max_input = max(max_input, speech_feature.shape[0])
py = self.text_to_vocab(txt)
if self.check_valid(py, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt pinyin {} not all in tokens,skip'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array)))
continue
text_feature = self.text_featurizer.extract(py)
if in_len < len(text_feature):
logging.info('{} feature length < pinyin length,skip'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
max_label1 = max(max_label1, len(text_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
y1.append(np.array(text_feature))
label_length1.append(len(text_feature))
if self.speech_config['use_mel_layer']:
if self.streaming:
max_input=max_input//self.chunk*self.chunk+self.chunk
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
else:
for i in range(len(speech_features)):
if speech_features[i].shape[0] < max_input:
pad = np.ones([max_input - speech_features[i].shape[0], speech_features[i].shape[1],
speech_features[i].shape[2]]) * speech_features[i].min()
speech_features[i] = np.vstack((speech_features[i], pad))
for i in range(len(y1)):
if y1[i].shape[0] < max_label1:
pad = np.ones(max_label1 - y1[i].shape[0]) * self.text_featurizer.pad
y1[i] = np.hstack((y1[i], pad))
x = np.array(speech_features, 'float32')
y1 = np.array(y1, 'int32')
input_length = np.array(input_length, 'int32')
label_length1 = np.array(label_length1, 'int32')
return x, input_length, y1, label_length1
def check_valid(self, txt, vocab_list):
if len(txt) == 0:
return False
for n in txt:
if n in vocab_list:
pass
else:
return n
return True
def GuidedAttentionMatrix(self, N, T, g=0.2):
W = np.zeros((N, T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(t / float(T) - n / float(N)) ** 2 / (2 * g * g))
return W
def guided_attention(self, input_length, targets_length, inputs_shape, mel_target_shape):
att_targets = []
for i, j in zip(input_length, targets_length):
i = int(i)
step = int(j)
pad = np.ones([inputs_shape, mel_target_shape]) * -1.
pad[i:, :step] = 1
att_target = self.GuidedAttentionMatrix(i, step, 0.2)
pad[:att_target.shape[0], :att_target.shape[1]] = att_target
att_targets.append(pad)
att_targets = np.array(att_targets)
return att_targets.astype('float32')
def generate(self, train=True):
sample = []
speech_features = []
input_length = []
y1 = []
label_length1 = []
max_input = 0
max_label1 = 0
if train:
batch = self.batch // 2 if self.augment.available() else self.batch
else:
batch = self.batch
for i in range(batch * 10):
if train:
line = self.train_list[self.train_offset]
self.train_offset += 1
if self.train_offset > len(self.train_list) - 1:
self.train_offset = 0
np.random.shuffle(self.train_list)
self.epochs += 1
else:
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info('{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if self.speech_config['use_mel_layer']:
if not self.streaming:
speech_feature = data /
|
np.abs(data)
|
numpy.abs
|
#####################################################################################
# CLASSICS - CalcuLAtionS of Self Interaction Cross Sections #
# by <NAME>, <NAME>, <NAME>, <NAME> and <NAME> #
#####################################################################################
# Requirements: python3, numpy, scipy
#
# This code provides the following functions:
#
# sigma(kappa, beta, mode, sign):
# Returns approximate analytical cross sections for the classical regime (kappa > 1) for given arguments
# kappa: Dimensionless momentum in the centre-of-mass frame, given by kappa = m_\chi v / (2 m_\phi)
# beta: Rescaled strength of the Yukawa potential, given by beta = 2 \alpha_\chi m_\phi / (m_\chi v^2)
# mode: Can take one of the following values:
# 'T': Returns the momentum transfer cross section for distinguishable particles
# 'V': Returns the viscosity cross section for distinguishable particles
# 'even': Returns the viscosity cross section for identical particles with even spatial wave function
# 'odd': Returns the viscosity cross section for identical particles with odd spatial wave function
# 'scalar: Returns the viscosity cross section for identical scalar particles
# 'fermion': Returns the viscosity cross section for identical fermions (averaged over initial spins)
# 'vector': Returns the viscosity cross section for identical vector particles (averaged over initial spins)
# If no mode is specified, the default option is 'T'
# sign: Can take one of the following values:
# 'attractive': Attractive Yukawa potential
# 'repulsive': Repulsive Yukawa potential
# If no sign is specified, the default option is 'attractive'
#
# sigma_Hulthen(kappa, beta, mode, sign, eps):
# Returns approximate analytical cross sections for the quantum regime (kappa < 1) for S-wave-only scattering under the Hulthen approximation, following Tulin, Yu & Zurek (arXiv:1302.3898)
# The arguments are the same as above, with the addition of
# eps: Numerical constant with default value 1.6.
#
# sigma_combined(kappa, beta, mode, sign):
# Returns the appropriate cross section depending on kappa, i.e. sigma for kappa > 1 and sigma_Hulthen for kappa < 0.4.
# To ensure continuity, the code linearly interpolates between the two different regimes between kappa = 0.4 and kappa = 1.
# The arguments are the same as above.
#
# averagedsigma(kappa0, beta0, mode, sign):
# Returns the averaged cross section for a Maxwell-Boltzmann distribution with velocity dispersion v0 based on pre-calculated tables.
# The arguments are the same as above with kappa0 = kappa(v = v0) and beta0 = beta(v = v0).
#
# IMPORTANT: The return values of all functions are dimensionless and need to be multiplied with (pi / m_phi^2) in order to obtain actual cross sections.
#
# Note: The option "approximate_eta" below determines whether the code should use approximate asymptotic expressions of the modified Bessel functions for large argument
# approximate_eta = True is slightly faster but inaccurate for small kappa
# approximate_eta = False is slightly slower but gives the best accuracy
import numpy as np
from numpy import sqrt, pi, sin, cos, log, exp, euler_gamma
from scipy.special import kn, gamma, loggamma
from scipy.interpolate import RectBivariateSpline
approximate_eta = False
# Definition of auxillary functions
lmin = lambda beta, kappa: max(1./2.,beta*kappa)
lminp = lambda beta, kappa: max(1.,2.*beta*kappa)
turn = lambda beta, betalow, a: exp(-(max(beta, betalow) - betalow)*a)
if approximate_eta:
eta = lambda x: -2.*log(x/2.)-1-2.*euler_gamma+(1-euler_gamma-log(x/2.))*x**2.
else:
eta = lambda x: x**2 * (- kn(1,x)**2 + kn(2,x)*kn(0,x))
zeta = lambda kappa, beta, lmin: (max(lmin, beta*kappa)**2 - lmin**2)/(2*kappa**2*beta**2) + eta(max(lmin, beta*kappa)/kappa)
lambdaT = (1.+cos(2.)+2*sin(2.))/2.
lambdaV = (9.-cos(4.)-4.*sin(4.))/16.
sigmaT_smallbeta = lambda beta, kappa: 2. * beta**2. * zeta(kappa, beta, 0.5)
sigmaV_smallbeta = lambda beta, kappa, lmin: 4. * beta**2. * zeta(kappa, 2.*beta, lmin)
def sigmaTatt(beta, kappa):
if beta < 1: return sigmaT_smallbeta(beta,kappa)*turn(beta,0.2,-0.64)
elif beta > 50: return 2. * log(beta) * (log(log(beta)) + 1)
else: return 4.7*log(beta + 0.82)
def sigmaTrep(beta, kappa):
if beta <1: return sigmaT_smallbeta(beta,kappa)*turn(beta,0.2,0.53)
elif beta > 50: return lambdaT * (log(2.*beta)-log(log(2.*beta)))**2.
else: return 2.9*log(beta + 0.47)
def sigmaVatt(beta, kappa, lmin):
if beta < 0.5: return sigmaV_smallbeta(beta,kappa,lmin)*turn(beta,0.1,-0.67)
elif beta > 25: return (1 + log(beta)- 1/(2.*log(beta)))**2/2.
else: return 2.5*log(beta + 1.05)
def sigmaVrep(beta, kappa, lmin):
if beta < 0.5: return sigmaV_smallbeta(beta,kappa,lmin)*turn(beta,0.1,0.370562)
elif beta > 25: return log(2. * beta) * (lambdaV * log(2. * beta) - (2.*lambdaV - 1) * log(log(2.*beta)))
else: return 2.8*log(beta + 0.80)
# Reading tabulated grids
modes = ['T','V','even','odd','scalar','fermion','vector']
signs = ['attractive','repulsive']
mode_factor = {'T': 1, 'V': 2/3., 'even': 4/3., 'odd': 0, 'scalar': 4/3., 'fermion': 1/3., 'vector': 8/9.}
beta0grid = np.logspace(-5,5, 101, endpoint=True)
kappa0grid = np.logspace(-3,3, 61, endpoint=True)
averagedsigmainterdict = {}
#averagedsigmadict = {}
for mode in modes:
for sign in signs:
outputname_data = 'sigma'+mode+'list_'+sign+'.txt'
averagedsigmagrid = np.loadtxt(outputname_data)
averagedsigmaarray = np.array(averagedsigmagrid)[:,2].reshape((len(kappa0grid),len(beta0grid))) + 1e-100
averagedsigmainterdict[mode+sign] = RectBivariateSpline(np.log10(kappa0grid), np.log10(beta0grid), np.log10(averagedsigmaarray))
# averagedsigmadict[mode+sign] = lambda x, y: 10**averagedsigmainterdict[mode+sign](np.log10(x),np.log10(y))[0,0]
# Definition of cross section functions
def sigma(kappa, beta, mode = 'T', sign = 'attractive'):
if not(sign == 'attractive' or sign == 'repulsive'):
print('Sign not recognized in function sigma()')
exit()
if kappa < 1:
print('Warning: kappa outside of range of validity in function sigma()')
return 0.
if mode == 'T':
if sign == 'attractive': return sigmaTatt(beta, kappa)
else: return sigmaTrep(beta, kappa)
elif mode == 'V':
if sign == 'attractive': return sigmaVatt(beta, kappa, 1.)
else: return sigmaVrep(beta, kappa, 1.)
elif mode == 'even':
if sign == 'attractive': return sigmaVatt(beta, kappa, 0.5)
else: return sigmaVrep(beta, kappa, 0.5)
elif mode == 'odd':
if sign == 'attractive': return sigmaVatt(beta, kappa, 1.5)
else: return sigmaVrep(beta, kappa, 1.5)
elif mode == 'scalar':
return sigma(kappa, beta, mode = 'even', sign = sign)
elif mode == 'fermion':
return 0.75*sigma(kappa, beta, mode = 'odd', sign = sign) + 0.25*sigma(kappa, beta, mode = 'even', sign = sign)
elif mode == 'vector':
return 1/3.*sigma(kappa, beta, mode = 'odd', sign = sign) + 2/3.*sigma(kappa, beta, mode = 'even', sign = sign)
else:
print('Mode not recognized in function sigma()')
exit()
def sigma_Hulthen(kappa, beta, mode = 'T', sign = 'attractive', eps=1.6):
if kappa > 1:
print('Warning: kappa outside of range of validity in function sigma_Hulthen()')
return 0
if beta > 1e6:
print('Warning: numerical instability possible for beta > 10^6 in function sigma_Hulthen()')
if not(mode in modes):
print('Mode not recognized in function sigma_Hulthen()')
exit()
i = 1j
unity = 1+0j
if sign == 'attractive':
beta_signed = -1*beta
elif sign == 'repulsive':
beta_signed = beta
else:
print('Sign not recognized in function sigma_Hulthen()')
exit()
lam_p = 1 + i*kappa/eps * (1 + np.sqrt( 1 + 2*beta_signed*eps*unity ) )
lam_m = 1 + i*kappa/eps * (1 - np.sqrt( 1 + 2*beta_signed*eps*unity ) )
arg = i*gamma(lam_p+lam_m-2)/exp(loggamma(lam_p)+loggamma(lam_m))
delta_0 = np.angle(arg)
sigma_s_wave = 4*np.pi/kappa**2 *
|
np.sin(delta_0)
|
numpy.sin
|
# -*- coding: utf-8 -*-
"""High level functions for making better use of baseline algorithms.
Functions in this module make use of other baseline algorithms in
pybaselines to provide better results or optimize parameters.
Created on March 3, 2021
@author: <NAME>
"""
from math import ceil
import numpy as np
from . import classification, morphological, polynomial, spline, whittaker
from ._algorithm_setup import _setup_optimizer, _setup_polynomial, _whittaker_smooth, _yx_arrays
from .utils import _check_scalar, _get_edges, _inverted_sort, gaussian
def collab_pls(data, average_dataset=True, method='asls', method_kwargs=None, **kwargs):
"""
Collaborative Penalized Least Squares (collab-PLS).
Averages the data or the fit weights for an entire dataset to get more
optimal results. Uses any Whittaker-smoothing-based or weighted spline algorithm.
Parameters
----------
data : array-like, shape (M, N)
An array with shape (M, N) where M is the number of entries in
the dataset and N is the number of data points in each entry.
average_dataset : bool, optional
If True (default) will average the dataset before fitting to get the
weighting. If False, will fit each individual entry in the dataset and
then average the weights to get the weighting for the dataset.
method : str, optional
A string indicating the Whittaker-smoothing-based or weighted spline method to
use for fitting the baseline. Default is 'asls'.
**method_kwargs
Keyword arguments to pass to the selected `method` function.
Returns
-------
baselines : np.ndarray, shape (M, N)
An array of all of the baselines.
params : dict
A dictionary with the following items:
* 'average_weights': numpy.ndarray, shape (N,)
The weight array used to fit all of the baselines.
Additional items depend on the output of the selected method. Every
other key will have a list of values, with each item corresponding to a
fit.
References
----------
<NAME>., et al. Collaborative Penalized Least Squares for Background
Correction of Multiple Raman Spectra. Journal of Analytical Methods
in Chemistry, 2018, 2018.
"""
dataset, fit_func, _, method_kws = _setup_optimizer(
data, method, (whittaker, morphological, classification, spline), method_kwargs,
True, **kwargs
)
if dataset.ndim < 2:
raise ValueError((
'the input data must have a shape of (number of measurements, number of points), '
f'but instead has a shape of {dataset.shape}'
))
if average_dataset:
_, fit_params = fit_func(np.mean(dataset.T, 1), **method_kws)
method_kws['weights'] = fit_params['weights']
else:
weights = np.empty_like(dataset)
for i, entry in enumerate(dataset):
_, fit_params = fit_func(entry, **method_kws)
weights[i] = fit_params['weights']
method_kws['weights'] = np.mean(weights.T, 1)
method_kws['tol'] = np.inf
baselines = np.empty(dataset.shape)
params = {'average_weights': method_kws['weights']}
method = method.lower()
if method == 'fabc':
# have to handle differently since weights for fabc is the mask for
# classification rather than weights for fitting
fit_func = _whittaker_smooth
for key in list(method_kws.keys()):
if key not in {'weights', 'lam', 'diff_order'}:
method_kws.pop(key)
for i, entry in enumerate(dataset):
baselines[i], param = fit_func(entry, **method_kws)
if method == 'fabc':
param = {'weights': param}
for key, value in param.items():
if key in params:
params[key].append(value)
else:
params[key] = [value]
return baselines, params
def optimize_extended_range(data, x_data=None, method='asls', side='both', width_scale=0.1,
height_scale=1., sigma_scale=1. / 12., min_value=2, max_value=8,
step=1, pad_kwargs=None, method_kwargs=None, **kwargs):
"""
Extends data and finds the best parameter value for the given baseline method.
Adds additional data to the left and/or right of the input data, and then iterates
through parameter values to find the best fit. Useful for calculating the optimum
`lam` or `poly_order` value required to optimize other algorithms.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
method : str, optional
A string indicating the Whittaker-smoothing-based, polynomial, or spline method
to use for fitting the baseline. Default is 'asls'.
side : {'both', 'left', 'right'}, optional
The side of the measured data to extend. Default is 'both'.
width_scale : float, optional
The number of data points added to each side is `width_scale` * N. Default
is 0.1.
height_scale : float, optional
The height of the added Gaussian peak(s) is calculated as
`height_scale` * max(`data`). Default is 1.
sigma_scale : float, optional
The sigma value for the added Gaussian peak(s) is calculated as
`sigma_scale` * `width_scale` * N. Default is 1/12, which will make
the Gaussian span +- 6 sigma, making its total width about half of the
added length.
min_value : int or float, optional
The minimum value for the `lam` or `poly_order` value to use with the
indicated method. If using a polynomial method, `min_value` must be an
integer. If using a Whittaker-smoothing-based method, `min_value` should
be the exponent to raise to the power of 10 (eg. a `min_value` value of 2
designates a `lam` value of 10**2).
Default is 2.
max_value : int or float, optional
The maximum value for the `lam` or `poly_order` value to use with the
indicated method. If using a polynomial method, `max_value` must be an
integer. If using a Whittaker-smoothing-based method, `max_value` should
be the exponent to raise to the power of 10 (eg. a `max_value` value of 3
designates a `lam` value of 10**3).
Default is 8.
step : int or float, optional
The step size for iterating the parameter value from `min_value` to `max_value`.
If using a polynomial method, `step` must be an integer.
pad_kwargs : dict, optional
A dictionary of options to pass to :func:`.pad_edges` for padding
the edges of the data when adding the extended left and/or right sections.
Default is None, which will use an empty dictionary.
method_kwargs : dict, optional
A dictionary of keyword arguments to pass to the selected `method` function.
Default is None, which will use an empty dictionary.
**kwargs
Deprecated in version 0.7.0 and will be removed in version 0.10.0 or 1.0. Pass any
keyword arguments for the fitting function in the `method_kwargs` dictionary.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The baseline calculated with the optimum parameter.
method_params : dict
A dictionary with the following items:
* 'optimal_parameter': int or float
The `lam` or `poly_order` value that produced the lowest
root-mean-squared-error.
* 'min_rmse': float
The minimum root-mean-squared-error obtained when using
the optimal parameter.
Additional items depend on the output of the selected method.
Raises
------
ValueError
Raised if `side` is not 'left', 'right', or 'both'.
TypeError
Raised if using a polynomial method and `min_value`, `max_value`, or
`step` is not an integer.
ValueError
Raised if using a Whittaker-smoothing-based method and `min_value`,
`max_value`, or `step` is greater than 100.
Notes
-----
Based on the extended range penalized least squares (erPLS) method from [1]_.
The method proposed by [1]_ was for optimizing lambda only for the aspls
method by extending only the right side of the spectrum. The method was
modified by allowing extending either side following [2]_, and for optimizing
lambda or the polynomial degree for all of the affected algorithms in
pybaselines.
References
----------
.. [1] <NAME>., et al. An Automatic Baseline Correction Method Based on
the Penalized Least Squares Method. Sensors, 2020, 20(7), 2015.
.. [2] <NAME>., et al. Range-independent background subtraction algorithm
for recovery of Raman spectra of biological tissue. Journal of Raman
Spectroscopy. 2012, 43(12), 1884-1894.
"""
side = side.lower()
if side not in ('left', 'right', 'both'):
raise ValueError('side must be "left", "right", or "both"')
y, fit_func, func_module, method_kws = _setup_optimizer(
data, method, (whittaker, polynomial, morphological, spline, classification),
method_kwargs, True, **kwargs
)
method = method.lower()
if func_module == 'polynomial' or method in ('dietrich', 'cwt_br'):
if any(not isinstance(val, int) for val in (min_value, max_value, step)):
raise TypeError((
'min_value, max_value, and step must all be integers when'
' using a polynomial method'
))
param_name = 'poly_order'
else:
if any(val > 100 for val in (min_value, max_value, step)):
raise ValueError((
'min_value, max_value, and step should be the power of 10 to use '
'(eg. min_value=2 denotes 10**2), not the actual "lam" value, and '
'thus should not be greater than 100'
))
param_name = 'lam'
_, x = _yx_arrays(y, x_data)
added_window = int(x.shape[0] * width_scale)
sort_x = x_data is not None
if sort_x:
sort_order = np.argsort(x, kind='mergesort') # to ensure x is increasing
x = x[sort_order]
y = y[sort_order]
if 'weights' in method_kws:
# have to adjust weight length to accomodate the added sections; set weights
# to 1 to ensure the added sections are fit
method_kws['weights'] = np.pad(
method_kws['weights'][sort_order],
[0 if side == 'right' else added_window, 0 if side == 'left' else added_window],
'constant', constant_values=1
)
max_x = x.max()
min_x = x.min()
x_range = max_x - min_x
known_background = np.array([])
fit_x_data = x
fit_data = y
lower_bound = upper_bound = 0
if pad_kwargs is None:
pad_kwargs = {}
added_left, added_right = _get_edges(y, added_window, **pad_kwargs)
added_gaussian = gaussian(
np.linspace(-added_window / 2, added_window / 2, added_window),
height_scale * abs(y.max()), 0, added_window * sigma_scale
)
if side in ('right', 'both'):
added_x = np.linspace(
max_x, max_x + x_range * (width_scale / 2), added_window + 1
)[1:]
fit_x_data = np.concatenate((fit_x_data, added_x))
fit_data = np.concatenate((fit_data, added_gaussian + added_right))
known_background = added_right
upper_bound += added_window
if side in ('left', 'both'):
added_x = np.linspace(
min_x - x_range * (width_scale / 2), min_x, added_window + 1
)[:-1]
fit_x_data = np.concatenate((added_x, fit_x_data))
fit_data = np.concatenate((added_gaussian + added_left, fit_data))
known_background = np.concatenate((known_background, added_left))
lower_bound += added_window
if func_module == 'polynomial' or method in ('iasls', 'dietrich', 'cwt_br'):
method_kws['x_data'] = fit_x_data
added_len = 2 * added_window if side == 'both' else added_window
upper_idx = fit_data.shape[0] - upper_bound
min_sum_squares = np.inf
best_val = None
# TODO maybe switch to linspace since arange is inconsistent when using floats
for var in np.arange(min_value, max_value + step, step):
if param_name == 'lam':
method_kws[param_name] = 10**var
else:
method_kws[param_name] = var
fit_baseline, fit_params = fit_func(fit_data, **method_kws)
# TODO change the known baseline so that np.roll does not have to be
# calculated each time, since it requires additional time
residual = (
known_background - np.roll(fit_baseline, upper_bound)[:added_len]
)
# just calculate the sum of squares to reduce time from using sqrt for rmse
sum_squares = residual.dot(residual)
if sum_squares < min_sum_squares:
baseline = fit_baseline[lower_bound:upper_idx]
params = fit_params
best_val = var
min_sum_squares = sum_squares
params.update(
{'optimal_parameter': best_val, 'min_rmse':
|
np.sqrt(min_sum_squares / added_len)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
# from src.Churn_prediction import Models
from deepctr_torch import models
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
from deepctr_torch import callbacks
import torch
max_epoch = 1000 #default=1000
device = 'cpu' #default='cpu'
batch_size = 256 #default=256
earlystop_patience = 50 #default=50
dnn_hidden_units = 256, 256
l2_reg_linear = 1e-05 #default=1e-05
l2_reg_dnn = 1e-4 #default=0
dnn_dropout = 0.5 #default=0
dnn_activation = 'relu' # default='relu'
dnn_use_bn = 0 #default=0
lr = 0.001 # default=0.02
random_seed = 2020 # default=2020
def DeepFM(dnn_hidden_units, l2_reg_linear, l2_reg_dnn, dnn_dropout,
feature_file):
datapath = "../../data"
user_path = "dataset_split"
label_file = "../../data/labels.csv"
auc = []
acc = []
f1_score = []
for fold in range(1, 6):
# Cross validation
user_file = [
user_path + "/fold-%d/train.uid.npy" % (fold),
user_path + "/fold-%d/dev.uid.npy" % (fold)
]
feature_file = feature_file if "all" not in feature_file else feature_file.replace(
"XXX", str(fold))
train_type = "basic"
if "diff" in feature_file:
train_type = "diff"
elif "all" in feature_file:
train_type = "diff_inf"
# load data
feature_df = pd.read_csv(os.path.join(datapath, feature_file))
label = pd.read_csv(label_file)
label = label[["user_id", "label"]]
feature_df = feature_df.drop(columns=["label"]).merge(label,
on=["user_id"])
uid_list = []
for file in user_file:
uid_list.append(np.load(os.path.join(datapath, file)))
# process feature
sparse_features = ["first_interval", "last_interval"]
dense_features = feature_df.columns.drop([
"user_id", "label", "interval_length", "first_interval",
"last_interval"
]).tolist()
feature_df[sparse_features] = feature_df[sparse_features].fillna(
'-1', )
feature_df[dense_features] = feature_df[dense_features].fillna(0, )
target = ['label']
for feat in sparse_features:
lbe = LabelEncoder()
feature_df[feat] = lbe.fit_transform(feature_df[feat])
mms = StandardScaler()
feature_df[dense_features] = mms.fit_transform(
feature_df[dense_features])
fixlen_feature_columns = [
SparseFeat(feat,
vocabulary_size=feature_df[feat].nunique(),
embedding_dim=4)
for i, feat in enumerate(sparse_features)
] + [DenseFeat(
feat,
1,
) for feat in dense_features]
dnn_feature_columns = fixlen_feature_columns
linear_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns)
# generate data
train_df = feature_df.loc[feature_df.user_id.isin(uid_list[0])]
val_df = feature_df.loc[feature_df.user_id.isin(uid_list[1])]
X_train = {name: train_df[name] for name in feature_names}
X_val = {name: val_df[name] for name in feature_names}
y_train = train_df[target].values
y_val = val_df[target].values
# DeepFM model
use_bn = False if dnn_use_bn == 0 else True
deep_FM = models.DeepFM(linear_feature_columns,
dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_dnn=l2_reg_dnn,
dnn_dropout=dnn_dropout,
dnn_activation=dnn_activation,
dnn_use_bn=use_bn,
task='binary',
device=device,
seed=random_seed)
optimizer = torch.optim.Adam(deep_FM.parameters(), lr=lr)
deep_FM.compile(
'adam',
"binary_crossentropy",
metrics=["binary_crossentropy",
"auc"]) # ,val_metrics=["auc","binary_crossentropy"]
# fit
model_callback = [
callbacks.EarlyStopping(patience=earlystop_patience,
monitor='val_binary_crossentropy',
mode="min")
]
deep_FM.fit(X_train,
y_train,
batch_size=batch_size,
epochs=max_epoch,
validation_data=(X_val, y_val),
callbacks=model_callback,
verbose=0)
# model predict
no_pred = True
pred = deep_FM.predict(X_val)
try:
prob = deep_FM.predict_proba(X_val)[:, 1]
except:
prob = pred
if no_pred:
pred = pred > 0.5
auc.append(metrics.roc_auc_score(y_val, prob))
acc.append(metrics.accuracy_score(y_val, pred))
f1_score.append(metrics.f1_score(y_val, pred))
print(train_type)
print("test_auc",
|
np.mean(auc)
|
numpy.mean
|
import os, itertools, shutil, re, string, sys, inspect
import numpy as np
np_vars = vars(np)
np_fs = {_key:np_vars[_key] for _key in np_vars.keys() if inspect.isfunction(np_vars[_key])}
def boldblue(text):
return "\033[1m" + '\033[94m' + text + "\033[0m"
def str_2_num(num_str):
if num_str.count(".") == 0:
return int(num_str)
else:
return float(num_str)
def dict_product(dicts):
"""
dict_product(dicts)
from a dict of parameters creates a generator which outputs a list of dicts
effectively a cartesian product over the parameter space.
eg: from: {'a': [0,1], 'b': [2,3], 'c': [4]}
outputs: [{'a': 0, 'b': 2, 'c': 4},
{'a': 0, 'b': 3, 'c': 4},
{'a': 1, 'b': 2, 'c': 4},
{'a': 1, 'b': 3, 'c': 4}]
"""
# from http://stackoverflow.com/questions/5228158/cartesian-product-of-a-dictionary-of-lists
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def dict_builder(params, test_name = ''):
"""
dict_builder(params)
uses the dict_product function and adds a
title key value pair for use in the input files
eg: from: {'a': [0,1], 'b': [2,3], 'c': [4]}
outputs: [{'TITLE': 'STEP_BOX0-4-2', 'a': '0', 'b': '2', 'c': '4'},
{'TITLE': 'STEP_BOX0-4-3', 'a': '0', 'b': '3', 'c': '4'},
{'TITLE': 'STEP_BOX1-4-2', 'a': '1', 'b': '2', 'c': '4'},
{'TITLE': 'STEP_BOX1-4-3', 'a': '1', 'b': '3', 'c': '4'}]
"""
for value_set in dict_product(params):
title = "-".join(map(str, value_set.values())).replace('.', '_')
vals = [dict([a, str(x)] for a, x in value_set.items())]
vals = vals[0]
vals['TITLE'] = test_name + title
yield vals
def input_directory_builder(folder_name, base_path):
"""
input_directory_builder(folder_name, base_path)
taking a base_path and a particular folder (folder_name) and creating both
folder_name inside of base_path if base_path does not exist and if base_path
does exist, just creates folder_name inside of it.
"""
calling_dir = os.getcwd()
if not os.path.exists(os.path.join(calling_dir, base_path)):
os.mkdir(base_path)
try:
os.chdir(os.path.join(calling_dir, base_path))
os.mkdir(folder_name)
except:
raise
finally:
os.chdir(calling_dir)
def build_input_files(filename, base_path = 'input_files', out = sys.stdout):
"""
build_input_files(filename, base_path = 'input_files')
takes a 'well-formated' input fileand outputs a
directory structure with the properly formated input files
created in them.
"""
calling_dir = os.getcwd()
# I'm doing this because I need it later
file_path, file_name = os.path.split(filename)
with open(filename, 'r') as f:
txt = f.read()
## First Parse the FDS file
param_dict, IOoutput = FDSa_parser(txt, file_name, out)
# param_dict, sweep_param_dict, prms_in_axis = calculate_params(param_dict, axes)
for key_ in param_dict.keys():
txt = txt.replace(param_dict[key_][0], key_)
formatted_trials, logfile, IOoutput = eval_parsed_FDS(param_dict, out)
print("formatted_trials", formatted_trials[0])
## Make input files and directories
for i, value_set in enumerate(formatted_trials):
print(i,value_set)
tmp_txt = txt
# make a directory
case_name = 'case_'+int2base(i, 26)
# FDS uses uppercase reseved keywords, and so will we
value_set['TITLE'] = case_name
input_directory_builder(case_name, base_path)
# populate the input file
print(tmp_txt.count(list(value_set.keys())[1]))
print(value_set)
with open('tmp_txt', 'w') as f:
f.write(str(tmp_txt))
tmp_txt = tmp_txt.format(**value_set) ## The format command doesn't like : or . because it things its a float format
# create the file name
fname = os.path.join(calling_dir, base_path,
case_name, case_name + '.fds')
# write the input file to the directory
with open(fname, 'w') as f:
f.write(str(tmp_txt))
log_path_name = os.path.join(calling_dir, base_path, file_name[:-4] + '.log')
# write the augmented fds log file
with open(log_path_name, 'a') as f:
f.write(logfile)
return IOoutput
def input_file_paths(base_path):
"""
input_file_paths(base_path)
returns the paths of the input files present by recusivly walking over them
so they can be iterated over in the main body of the program via multiprocessing
"""
paths = []
for dirpath, dirnames, filenames in os.walk(base_path):
for onefile in filenames:
# the following if statement is due to OS X .DsStore bullshit...
if not (onefile.startswith('.DS') or onefile.endswith('.log')):
#paths.append(dirpath+"/"+onefile)
paths.append(os.path.join(os.getcwd(), dirpath, onefile))
return paths
def int2base(x, base=26):
"""
int2base(x, base)
takes an integer and returns the base 26 representation (defualt) in letters
like one would see in excel column labeling (0 -> a, 63 -> cl)
based on https://stackoverflow.com/questions/2267362
"""
digs = string.ascii_lowercase
assert type(x) is int, "x is not an integer: %r" % x
assert type(base) is int, "base is not an integer: %r" % base
if x < 0: sign = -1
elif x==0: return 'a'
else: sign = 1
x *= sign
digits = []
y = x
while y:
print(x % base)
digits.append(digs[x % base])
y = x // base
x = (x // base) - 1
if sign < 0:
digits.append('-')
digits.reverse()
#
return ''.join(digits)[::-1]
def eval_line(param_string):
"""
eval_line(param_string)
takes string and turns it into an array of parameter values to be swept using
numpy functions.
ex: param_string = {HEAT_FlUX linspace 0 30 7 dtype int axis 1}
"""
words = param_string.strip("{}").split(" ")
if words[-2] == 'axis':
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
axis_ = words[-1]
words = words[0:-2]
print(words)
else:
axis_ = None
print(words)
# Backwards compatibility
if words[1] == "SWEEP":
print("\nusing legacy version of parFDS")
return [words[0],
np.linspace(str_2_num(words[2].strip(",")),str_2_num(words[3].strip(",")),str_2_num(words[4].strip(","))),
axis_]
elif words[1] in np_fs.keys():
_skip_next = False
args = []
kwargs = {}
print("\tesing function: "+"numpy."+(words[1]))
print("\t\twith args: ",end = "")
f_call = "np."+words[1] + "("
for i_, w_ in enumerate(words[2:]):
print(i_, w_)
if _skip_next:
_skip_next=False
continue
elif (w_ in inspect.signature(np_fs[words[1]]).parameters.keys()):
print("KWARGS")
print(words)
print(w_ +"="+ words[2+i_+1]+",",end ="")
_skip_next = True
f_call = f_call + w_ +"="+ words[2+i_+1]+","
kwargs[w_] = words[2+i_+1]
else:
print(w_ + ",",end ="")
args.append(str_2_num(w_.strip(",")))
f_call = f_call + w_+ ","
f_call = f_call[0:-1] + ")"
print("\n"+f_call)
print("args, kwargs",args, kwargs)
print(np_fs[words[1]](*args,**kwargs))
return [words[0], np_fs[words[1]](*args,**kwargs), axis_]
else:
raise ValueError("specified function is not a numpy function!")
def FDSa_parser(txt, filename, IOoutput=sys.stdout):
"""
FDSa_parser(txt, filename, IOoutput)
takes in an augmented FDS file and determines how many
parametric will be created from that it also parses the augmented syntax to
build the dictionary used in generating the specific case FDS files
"""
## I'm doing this because I need it later
#file_path, file_name = os.path.split(filename)
# open the augmented fds input file
#with open(os.path.join(file_path, file_name), 'r') as f:
# read_data = f.read()
regex_find = re.findall('\{*[0-9a-zA-Z_:,.\s]*\}', txt)
# params = []
# params_raw = []
# axes = []
raw_2_array = {}
for param in regex_find:
# params_raw.append(param.strip('{}'))
# params.append(param.strip('{}').split('SWEEP'))
if "TITLE" not in param:
# if 'axis' in param:
# params[-1][-1] = params[-1][-1].split('axis')[0]
# axes.append(param.strip('{}').split('axis')[1])
# print("/n")
# print (param.strip('{}'))
short, values, axis = eval_line(param.strip('{}'))
raw_2_array[short] = [param.strip('{}'), values, axis]
txt = txt.replace(param.strip('{}'),short)
# else:
# axes.append(None)
# raw_2_array[param.strip('{}')] = [param_array_builder(param.strip('{}')), None]
IOoutput.write('-'*10 + 'ParFDS input file interpreter' + '-'*10 + '\n')
IOoutput.write('the following are the keys and values'+ '\n')
IOoutput.write('seen in ' + filename + '\n')
return raw_2_array, IOoutput
# params = [item.strip() for sublist in params for item in sublist]
# # if length of params is non-even that means I can assume a title parameter
# # double check with the occurance of FDSa 'reserved' keywords 'title' or 'name'
# if (len(params) % 2 != 0) and (params[0].lower() == ('title')):
# # based on the following idiom
# # https://stackoverflow.com/questions/3303213
# param_dict = dict(zip(params[1::2], params[2::2]))
# param_list = [params[0]]
# param_list.extend(params[1::2])
# param_name_dict = dict(zip(param_list, params_raw))
# else:
# param_dict = dict(zip(params[::2], params[1::2]))
# param_list = params[::2]
# param_name_dict = dict(zip(param_list, params_raw))
# dealing with the `:` and `.` issue in the FDS file due to
# key value restrictions in python
# for key, value in param_name_dict.items():
# #txt = string.replace(txt, value, key)
# txt = txt.replace(value, key)
# for key, value in raw_2_array.items():
# print(key,value)
# # txt = string.replace(txt, value, key)
# txt = txt.replace(txt, key, key)
# IOoutput.write('-'*10 + 'ParFDS input file interpreter' + '-'*10 + '\n')
# IOoutput.write('the following are the keys and values'+ '\n')
# IOoutput.write('seen in ' + filename + '\n')
#print(param_dict)
#assert False, "DEBUG"
# return param_dict, txt, IOoutput, axes,
def assign_dimensions(param_dict):
## Assign Dimensions First
axes = np.array([param_dict[key][2] for key in param_dict.keys()])
reserved_dims =
|
np.unique(axes[axes != None])
|
numpy.unique
|
"""Coordinate transformation functions."""
from warnings import simplefilter, warn
simplefilter("always", DeprecationWarning)
import numpy as np
from numpy.linalg import inv, norm, svd
from mbuild.utils.exceptions import RemovedFuncError
__all__ = [
"force_overlap",
"x_axis_transform",
"y_axis_transform",
"z_axis_transform",
# Deprecated
"equivalence_transform",
"rotate",
"rotate_around_x",
"rotate_around_y",
"rotate_around_z",
"spin",
"spin_x",
"spin_y",
"spin_z",
"translate",
"translate_to",
]
def force_overlap(move_this, from_positions, to_positions, add_bond=True):
"""Move a Compound such that a position overlaps with another.
Computes an affine transformation that maps the from_positions to the
respective to_positions, and applies this transformation to the compound.
Parameters
----------
move_this : mb.Compound
The Compound to be moved.
from_positions : np.ndarray, shape=(n, 3), dtype=float
Original positions.
to_positions : np.ndarray, shape=(n, 3), dtype=float
New positions.
add_bond : bool, optional, default=True
If `from_positions` and `to_positions` are `Ports`, create a bond
between the two anchor atoms.
"""
from mbuild.port import Port
T = None
if isinstance(from_positions, (list, tuple)) and isinstance(
to_positions, (list, tuple)
):
equivalence_pairs = zip(from_positions, to_positions)
elif isinstance(from_positions, Port) and isinstance(to_positions, Port):
equivalence_pairs, T = _choose_correct_port(
from_positions, to_positions
)
from_positions.used = True
to_positions.used = True
else:
equivalence_pairs = [(from_positions, to_positions)]
if not T:
T = _create_equivalence_transform(equivalence_pairs)
atom_positions = move_this.xyz_with_ports
atom_positions = T.apply_to(atom_positions)
move_this.xyz_with_ports = atom_positions
if add_bond:
if isinstance(from_positions, Port) and isinstance(to_positions, Port):
if not from_positions.anchor or not to_positions.anchor:
warn("Attempting to form bond from port that has no anchor")
else:
from_positions.anchor.parent.add_bond(
(from_positions.anchor, to_positions.anchor)
)
to_positions.anchor.parent.add_bond(
(from_positions.anchor, to_positions.anchor)
)
from_positions.anchor.parent.remove(from_positions)
to_positions.anchor.parent.remove(to_positions)
class CoordinateTransform(object):
"""Coordinate transforms."""
def __init__(self, T=None):
if T is None:
T = np.eye(4)
self.T = T
self.Tinv = inv(T)
def apply_to(self, A):
"""Apply the coordinate transformation to points in A."""
if A.ndim == 1:
A = np.expand_dims(A, axis=0)
rows, cols = A.shape
A_new = np.hstack([A, np.ones((rows, 1))])
A_new = np.transpose(self.T.dot(np.transpose(A_new)))
return A_new[:, 0:cols]
class Translation(CoordinateTransform):
"""Cartesian translation."""
def __init__(self, P):
T = np.eye(4)
T[0, 3] = P[0]
T[1, 3] = P[1]
T[2, 3] = P[2]
super(Translation, self).__init__(T)
class RotationAroundZ(CoordinateTransform):
"""Rotation around the z-axis."""
def __init__(self, theta):
T = np.eye(4)
T[0, 0] = np.cos(theta)
T[0, 1] = -np.sin(theta)
T[1, 0] = np.sin(theta)
T[1, 1] = np.cos(theta)
super(RotationAroundZ, self).__init__(T)
class RotationAroundY(CoordinateTransform):
"""Rotation around the y-axis."""
def __init__(self, theta):
T = np.eye(4)
T[0, 0] = np.cos(theta)
T[0, 2] = np.sin(theta)
T[2, 0] = -
|
np.sin(theta)
|
numpy.sin
|
# Copyright (C) 2015, <NAME> <<EMAIL>>
#
# LICENSE: THE SOFTWARE IS PROVIDED "AS IS" UNDER THE
# ACADEMIC FREE LICENSE (AFL) v3.0.
#
import os
from mpi4py import MPI
import numpy as np
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def visualize_all_weights(output, model, nmultilayer, config, first=0, last=20, figure=1, show=False, save=True, ion=True):
# Optimized for two processing layers
if ( (output._PICTURE_OUTPUT == True)
and (MPI.COMM_WORLD.Get_rank() == 0)
and ( (model.MultiLayer[nmultilayer].get_iteration() % output._PICTURE_EVERY_N_ITERATIONS == 0)
or (model.MultiLayer[nmultilayer].get_iteration() == model.MultiLayer[nmultilayer].get_iterations()) ) ):
Layer = model.MultiLayer[nmultilayer].Layer
# create figure if not given
if (issubclass(type(figure), matplotlib.figure.Figure)):
pass
elif (issubclass(type(figure), int)):
figure = plt.figure(figure)
else:
figure = plt.figure()
figure.clf()
if ( (last is None) or (last > Layer[1].C) ):
last = Layer[1].C
# plot all given images on sub-plots
cols = int(np.ceil(math.sqrt(last-first)))
rows = int(np.ceil((last-first)/float(cols)))
#for squares data set:
#cols = 1
#rows = last-first
NLAYERS = model.MultiLayer[nmultilayer].number_of_layers()
width_ratios = []
# 1: 1/N1 : 1/N2 = N1N2 : N2 : N1
ratio = 1
for nlayer in xrange(2,NLAYERS):
ratio *= Layer[nlayer].get_weights().shape[0]
for _ in xrange(cols):
for nlayer in xrange(1,NLAYERS):
if (nlayer == 1):
width_ratios.append(ratio)
else:
width_ratios.append(ratio/Layer[nlayer].get_weights().shape[0])
npixels_width = np.ceil(np.sqrt(Layer[1].D[0]))
for nlayer in xrange(2,NLAYERS):
npixels_width += np.ceil(np.sqrt(Layer[1].D[0]))/Layer[nlayer].get_weights().shape[0]
npixels_width *= cols
npixels_height = np.ceil(math.sqrt(Layer[1].D[0]))*rows
scale = 2 #adjust for higher resolution
pixel_width = scale*npixels_width + (NLAYERS-1)*cols+1
pixel_height = scale*npixels_height + (rows+1)
gs = gridspec.GridSpec(rows, (NLAYERS-1)*cols, width_ratios=width_ratios)
text_space = 0.45
# the spacing has some problems which require the arbitrary factors 2. and 2.14 in 'right', 'top' and 'wspace', 'hspace'
gs.update(left=1./pixel_width, right=1.-2.*1./pixel_width, bottom = text_space/float(rows), top = 1.-2.*1./pixel_height, wspace = 2.14*((NLAYERS-1)*cols+1)/(scale*npixels_width), hspace = 1.2*text_space)
figure.set_figwidth(pixel_width/100)
figure.set_figheight(pixel_height/100)
figure.set_facecolor('white')
all_img_2D = [(Layer[nlayer].get_weights()) for nlayer in xrange(1,NLAYERS)]
vocabulary_file = open('./data-sets/20Newsgroups/vocabulary.txt', 'r')
vocabulary = []
for line in vocabulary_file:
vocabulary.append(line[0:-1]) # omits the '\n' at the end of each line
vocabulary = np.asarray(vocabulary)
try:
label_names = config.get()['dataset']['classes']
except:
label_file = open('./data-sets/20Newsgroups/label_names.txt', 'r')
label_names = []
for line in label_file:
label_names.append(line[0:-1]) # omits the '\n' at the end of each line
label_names = np.asarray(label_names)
# ymax = np.power(2,np.ceil(np.log2(np.max(all_img_2D[0][first:last,:]))))
for nimage in xrange(first,last):
for nlayer in xrange(1,NLAYERS):
if (nlayer == 1):
# for some reason this produces a memory leak in combination with imshow:
#img_2D = Layer[nlayer].get_weights()[nimage,:]
img_2D = all_img_2D[nlayer-1][nimage,:]
index = np.argsort(img_2D)[::-1][0:20]
np.set_printoptions(threshold=np.nan)
width = 0.8 # bar width
ax = plt.subplot(gs[nimage*(NLAYERS-1)-first+nlayer-1])
xTickMarks = vocabulary[index]
ax.set_xticks(np.arange(index.shape[0])+0.5*width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=90, fontsize=16)
ax.set_yticks([])
# plt.ylim([0, ymax])
figure_title = label_names[np.argmax(all_img_2D[nlayer][:,nimage])] \
+ '\np(k|c) = ' + str(np.round(np.max(all_img_2D[nlayer][:,nimage])/np.sum(all_img_2D[nlayer][:,nimage])*100.)) + '%'\
+ '\np(c|k) = ' + str(np.round(np.max(all_img_2D[nlayer][:,nimage])*100.)) + '%'
plt.title(figure_title, y=0.65, fontsize=20)
plt.axis('on')
ax.bar(
|
np.arange(index.shape[0])
|
numpy.arange
|
import numpy as np
from matplotlib import pyplot as plt
from iminuit import cost, Minuit
from scipy.constants import k, hbar
from scipy.special import kv, iv
class ResonanceKid():
"""
Class to automate and simplify the handling of resonance data and fitting.
Current main functionalities:
plotting
fitting
"""
def __init__(
self,
filename,
norm = True,
shift = True
):
self._fit_result = None
self._readfile(filename, norm, shift)
def model(val_x, val_a, val_b, val_c, val_d, val_q, val_qc, val_phi):
pol = val_a + val_b*val_x + val_c*val_x**2 + val_d*val_x**3
quad = 1 - (val_q/val_qc)*np.exp(1j*val_phi)/(1-2j*val_q*val_x)
return pol*abs(quad)
self._fit_function = model
@property
def freqs(self):
return self._freqs
@property
def amps(self):
return self._amps
@property
def err_amps(self):
return self._err_amps
@property
def min_freq(self):
return self._min_freq
@property
def amp_max(self):
return self._amp_max
@property
def chi2(self):
if self.fit_result is None:
print("No fit found: doing it now")
_ = self.fit()
return self.fit_result.fval / (len(self.amps) - self.fit_result.npar)
@property
def fit_result(self):
return self._fit_result
@fit_result.setter
def fit_result(self, value):
self._fit_result = value
@property
def chi2(self):
if self.fit_result is None:
print("No fit found: doing it now")
_ = self.fit()
return self.fit_result.fval / (len(self.amps) - self.fit_result.npar)
def _readfile(self, filename, norm, shift):
freqs = []
amps = []
with open(filename, encoding='utf-8') as file:
for line in file:
splitted = [float(x) for x in line.split('\t')]
freqs.append(splitted[0])
amps.append(np.sqrt(splitted[1]**2 + splitted[2]**2))
self._min_freq = freqs[amps.index(min(amps))]
self._amp_max = amps[0]
err_amps = [0.01]*len(amps)
self._freqs = np.array(freqs)
self._amps = np.array(amps)
self._err_amps = np.array(err_amps)
if norm:
self._amps = self.amps / self.amp_max
if shift:
self._freqs = (self.freqs - self.min_freq)/self.min_freq
def fit(self, init_parameters = None):
if init_parameters is None:
init_parameters = [1, 1e-10, 1e-15, 1e-20, 1e3, 1e3, 0.15]
cost_func = cost.LeastSquares(self.freqs, self.amps, self.err_amps, self._fit_function)
m_obj = Minuit(cost_func, *init_parameters)
self.fit_result = m_obj
return m_obj.migrad()
def plot_fit(self):
plt.scatter(self.freqs, self.amps, s=0.5)
if self.fit_result is None:
print("No fit found: doing it now")
_ = self.fit()
plt.plot(self.freqs, self._fit_function(self.freqs, *self.fit_result.values),
color='red', label='fit')
plt.show()
def plot_amp(self):
plt.scatter(self.freqs, self.amps, s=0.5)
plt.show()
class GapFinder():
def __init__(
self,
filename,
omega = 3.03*1e9,
inv_q_0 = 4.791014e-5,
alpha = 0.66
):
self._fit_result = None
self.omega = omega
self.inv_q_0 = inv_q_0
self.alpha = alpha
self._readfile(filename)
def model(val_t, delta0):
val_t = val_t * 1e-3
omega = self.omega
xi = hbar * omega / (2 * k * val_t)
ourk = 1.380649
sigma1 = 4*np.exp(-delta0/(ourk*val_t))*np.sinh(xi)*kv(0, xi)
sigma2 = np.pi*(1-2*np.exp(-delta0/(ourk*val_t))*
|
np.exp(-xi)
|
numpy.exp
|
import numpy as np
def CPSC2019_challenge(result):
pos=np.argwhere(result>0.5).flatten()
rpos = []
pre = 0
last = len(pos)
for j in np.where(np.diff(pos)>2)[0]:
if j-pre>2:
rpos.append((pos[pre]+pos[j])*4)
pre = j+1
rpos.append((pos[pre]+pos[last-1])*4)
qrs = np.array(rpos)
qrs_diff = np.diff(qrs)
check = True
while check:
qrs_diff = np.diff(qrs)
for r in range(len(qrs_diff)):
if qrs_diff[r]<100:
if result[int(qrs[r]/8)]>result[int(qrs[r+1]/8)]:
qrs = np.delete(qrs,r+1)
check = True
break
else:
qrs = np.delete(qrs,r)
check = True
break
check = False
hr = np.array([loc for loc in qrs if (loc > 2750 and loc < 4750)])
if len(hr)>1:
hr = round( 60 * 500 / np.mean(
|
np.diff(hr)
|
numpy.diff
|
import math
from src import constants
from src.plot_functions.plot_agentEstimator import AgentEstimatorPloter
from src.plot_functions.plot_controller import ControllerPlot, plot_scenario, plot_scenario_last
from src.plot_functions.plot_targetEstimator import Analyser_Target_TargetEstimator_FormatCSV
import matplotlib.pyplot as plt
import numpy as np
constants.ResultsPath.folder = "../../to_share"
constants.ResultsPath.name_simulation = "controller-repulsive-mode"
#constants.ResultsPath.folder = "../../results"
#constants.ResultsPath.name_simulation = "My_new_map"
target_ploter = analyser_simulated_data = Analyser_Target_TargetEstimator_FormatCSV("",
constants.ResultsPath.SAVE_LOAD_DATA_REFERENCE,
constants.ResultsPath.SAVE_LAOD_PLOT_FOLDER)
user_plotter = Analyser_Target_TargetEstimator_FormatCSV(100, constants.ResultsPath.SAVE_LOAD_DATA_MEMORY_AGENT,
constants.ResultsPath.SAVE_LOAD_PLOT_MEMORY_AGENT)
controller_ploter_0 = ControllerPlot(0)
fig = plt.figure(figsize=(12, 8),tight_layout = True)
fig50 = plt.figure(figsize=(12, 8),tight_layout = True)
ax = fig.add_subplot(1, 1, 1)
ax50 = fig50.add_subplot(1, 1, 1)
colors = ["green","orangered"]
for element,color in zip(target_ploter.simulated_data_sort_by_target,colors):
sc = ax.scatter(element.data_list[7], element.data_list[8], c=
|
np.array(element.data_list[0])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 15:00:09 2019
@author: sebastian and Casper
modified November 2019 by Melisa
modified January 2020 by Morgane
"""
import numpy as np
import src.configuration
import matplotlib.pyplot as plt
from caiman.source_extraction.cnmf.cnmf import load_CNMF
from src.Database.database_connection import database
mycursor = database.cursor()
from src.Steps.component_evaluation import run_component_evaluation as main_component_evaluation
#%% Settings
mouse_number = 56165
session = 1
init_trial = 6
end_trial = 11
is_rest = 1
sql = "SELECT * FROM Analysis WHERE mouse=%s AND session= %s AND is_rest=%s AND cropping_v =%s AND motion_correction_v =%s AND source_extraction_v=%s AND alignment_v=%s ORDER BY trial > 5"
val = [mouse_number, session, is_rest, 1, 1, 1, 0]
mycursor.execute(sql, val)
myresult = mycursor.fetchone()
for x in myresult:
selected_rows = x
corr_mean_array = []
pnr_mean_array = []
corr_std_array = []
pnr_std_array = []
trial_name_array = []
corr_mean_array_is_rest = []
pnr_mean_array_is_rest = []
corr_std_array_is_rest = []
pnr_std_array_is_rest = []
trial_name_array_is_rest = []
for i in range(len(selected_rows)):
row = selected_rows.iloc[2 * i]
# Get the index from the row
index = row.name
source_extraction_output = eval(row.loc['source_extraction_output'])
cn_filter = np.load(source_extraction_output['meta']['corr']['main'])
pnr = np.load(source_extraction_output['meta']['pnr']['main'])
corr_mean_array = np.append(corr_mean_array, np.mean(cn_filter))
pnr_mean_array = np.append(pnr_mean_array, np.mean(pnr))
corr_std_array = np.append(corr_std_array, np.std(cn_filter))
pnr_std_array = np.append(pnr_std_array, np.std(pnr))
trial_name_array = np.append(trial_name_array, db.get_trial_name(index[2], index[3]))
row = selected_rows.iloc[2 * i + 1]
# Get the index from the row
index = row.name
source_extraction_output = eval(row.loc['source_extraction_output'])
cn_filter = np.load(source_extraction_output['meta']['corr']['main'])
pnr = np.load(source_extraction_output['meta']['pnr']['main'])
corr_mean_array_is_rest = np.append(corr_mean_array_is_rest, np.mean(cn_filter))
pnr_mean_array_is_rest = np.append(pnr_mean_array_is_rest, np.mean(pnr))
corr_std_array_is_rest = np.append(corr_std_array_is_rest, np.std(cn_filter))
pnr_std_array_is_rest = np.append(pnr_std_array_is_rest, np.std(pnr))
trial_name_array_is_rest = np.append(trial_name_array_is_rest, db.get_trial_name(index[2], index[3]))
# %% Plot correlation and peak to noise measures with error bars
vlines = [1, 6, ] # 11,16,22,27,32,37,43]
vlines_session = [1] # ,21,43]
corr_y_min = 0.2
corr_y_max = 0.7
pnr_y_min = 0
pnr_y_max = 10
linestyle = '--'
fig, axes = plt.subplots(2, 2, sharex=True)
fig.set_size_inches(15, 10)
N = len(corr_mean_array)
axes[0][0].vlines(vlines, ymin=corr_y_min, ymax=corr_y_max, color='red', linestyle=linestyle)
axes[0][0].vlines(vlines_session, ymin=corr_y_min, ymax=corr_y_max, color='blue', linestyle=linestyle)
axes[0][0].errorbar(np.arange(1, N + 1), corr_mean_array, corr_std_array)
axes[0][0].legend(('TrialDays', 'Testing', 'Corr'))
# axes[0].set_xticks(np.arange(0,N)[::2])
# axes[0].set_xticklabels(trial_name_array[::2])
# axes[0].set_xlabel('trial')
axes[0][0].set_title('correlation')
axes[0][0].set_ylabel('mean')
axes[0][0].set_ylim(corr_y_min, corr_y_max)
axes[1][0].vlines(vlines, ymin=pnr_y_min, ymax=pnr_y_max, color='red', linestyle=linestyle)
axes[1][0].vlines(vlines_session, ymin=pnr_y_min, ymax=pnr_y_max, color='blue', linestyle=linestyle)
axes[1][0].errorbar(np.arange(1, N + 1), pnr_mean_array, pnr_std_array, c='orange')
axes[1][0].legend(('TrialDays', 'Testing', 'pnr'))
# axes[1].set_xticks(np.arange(0, N)[::2])
# axes[1].set_xticklabels(trial_name_array[::2])
axes[1][0].set_title('pnr')
axes[1][0].set_ylabel('mean')
axes[1][0].set_xlabel('trial')
axes[1][0].set_ylim(pnr_y_min, pnr_y_max)
axes[0][1].vlines(vlines, ymin=corr_y_min, ymax=corr_y_max, color='red', linestyle=linestyle)
axes[0][1].vlines(vlines_session, ymin=corr_y_min, ymax=corr_y_max, color='blue', linestyle=linestyle)
axes[0][1].errorbar(np.arange(1, N + 1), corr_mean_array_is_rest, corr_std_array_is_rest)
# axes[0].set_xticks(np.arange(0,N)[::2])
# axes[0].set_xticklabels(trial_name_array[::2])
# axes[0].set_xlabel('trial')
axes[0][1].set_title('correlation_R')
axes[0][1].set_ylabel('mean')
axes[0][1].set_ylim(corr_y_min, corr_y_max)
axes[1][1].vlines(vlines, ymin=pnr_y_min, ymax=pnr_y_max, color='red', linestyle=linestyle)
axes[1][1].vlines(vlines_session, ymin=pnr_y_min, ymax=pnr_y_max, color='blue', linestyle=linestyle)
# axes[1].vlines(vlines, ymin=4.3, ymax=5.5, color='red', linestyle=linestyle)
axes[1][1].errorbar(np.arange(1, N + 1), pnr_mean_array_is_rest, pnr_std_array_is_rest, c='orange')
# axes[1].set_xticks(np.arange(0, N)[::2])
# axes[1].set_xticklabels(trial_name_array[::2])
axes[1][1].set_title('pnr_is_rest')
axes[1][1].set_ylabel('mean')
axes[1][1].set_xlabel('trial')
axes[1][1].set_ylim(pnr_y_min, pnr_y_max)
# axes[1].set_ylim(4.3, 8.2)
plt.subplots_adjust()
fig.savefig('data/interim/source_extraction/trial_wise/meta/figures/fig:corrpnrphotobleaching' + str(56165) + '.png')
# %% Plot mean correlation
linestyle = '--'
fig, axes = plt.subplots(2, 2, sharex=True)
fig.set_size_inches(15, 10)
N = len(corr_mean_array)
axes[0][0].vlines(vlines, ymin=corr_y_min, ymax=corr_y_max, color='red', linestyle=linestyle)
axes[0][0].vlines(vlines_session, ymin=corr_y_min, ymax=corr_y_max, color='blue', linestyle=linestyle)
axes[0][0].plot(np.arange(1, N + 1), corr_mean_array)
axes[0][0].legend(('TrialDays', 'Testing', 'Corr'))
# axes[0].set_xticks(np.arange(0,N)[::2])
# axes[0].set_xticklabels(trial_name_array[::2])
# axes[0].set_xlabel('trial')
axes[0][0].set_title('correlation')
axes[0][0].set_ylabel('mean')
axes[0][0].set_ylim(0.4, 0.7)
axes[1][0].vlines(vlines, ymin=pnr_y_min, ymax=pnr_y_max, color='red', linestyle=linestyle)
axes[1][0].vlines(vlines_session, ymin=pnr_y_min, ymax=pnr_y_max, color='blue', linestyle=linestyle)
axes[1][0].plot(np.arange(1, N + 1), pnr_mean_array, c='orange')
axes[1][0].legend(('TrialDays', 'Testing', 'pnr'))
# axes[1].set_xticks(np.arange(0, N)[::2])
# axes[1].set_xticklabels(trial_name_array[::2])
axes[1][0].set_title('pnr')
axes[1][0].set_ylabel('mean')
axes[1][0].set_xlabel('trial')
axes[1][0].set_ylim(0, 10)
N = len(corr_mean_array_is_rest)
axes[0][1].plot(np.arange(1, N + 1), corr_mean_array_is_rest)
axes[0][1].vlines(vlines, ymin=corr_y_min, ymax=corr_y_max, color='red', linestyle=linestyle)
axes[0][1].vlines(vlines_session, ymin=corr_y_min, ymax=corr_y_max, color='blue', linestyle=linestyle)
# axes[0].set_xticks(np.arange(0,N)[::2])
# axes[0].set_xticklabels(trial_name_array[::2])
# axes[0].set_xlabel('trial')
axes[0][1].set_title('correlation_R')
axes[0][1].set_ylabel('mean')
axes[0][1].set_ylim(0.4, 0.7)
# axes[1].vlines(vlines, ymin=4.3, ymax=5.5, color='red', linestyle=linestyle)
axes[1][1].plot(np.arange(1, N + 1), pnr_mean_array_is_rest, c='orange')
# axes[1].set_xticks(np.arange(0, N)[::2])
# axes[1].set_xticklabels(trial_name_array[::2])
axes[1][1].vlines(vlines, ymin=pnr_y_min, ymax=pnr_y_max, color='red', linestyle=linestyle)
axes[1][1].vlines(vlines_session, ymin=pnr_y_min, ymax=pnr_y_max, color='blue', linestyle=linestyle)
axes[1][1].set_title('pnr_is_rest')
axes[1][1].set_ylabel('mean')
axes[1][1].set_xlabel('trial')
axes[1][1].set_ylim(1, 10)
# axes[1].set_ylim(4.3, 8.2)
plt.subplots_adjust()
fig.savefig(
'data/interim/source_extraction/trial_wise/meta/figures/fig:corrpnrphotobleaching' + str(56165) + '_mean.png')
# %% Plot number of detected cells and number of accepted cells using diferent criteria for acceptance
pcc_iteration = [0.95, 0.99]
min_SNR_iteration = [2, 3, 4, 5] # adaptive way to set threshold on the transient size
N_trials = 25
number_cells = np.zeros(
(len(pcc_iteration), len(min_SNR_iteration), N_trials)) ## matrix for saving number of detected components
number_cells_ac = np.zeros(number_cells.shape) ## matrix for saving number of accepted components
number_cells_R = np.zeros(number_cells.shape) ## matrix for saving number of detected components
number_cells_ac_R = np.zeros(number_cells.shape) ## matrix for saving number of accepted components
i = 0
for r_values_min in pcc_iteration:
j = 0
for min_SNR in min_SNR_iteration:
for trial in range(15, 25):
row = selected_rows.iloc[2 * trial]
# Get the index from the row
index = row.name
parameters_component_evaluation = {'min_SNR': min_SNR,
'rval_thr': r_values_min,
'use_cnn': False}
main_component_evaluation(index, row, parameters_component_evaluation)
component_evaluation_output = eval(row.loc['component_evaluation_output'])
input_hdf5_file_path = component_evaluation_output['main']
cnm = load_CNMF(input_hdf5_file_path)
number_cells[i][j][trial] = len(cnm.estimates.C)
number_cells_ac[i][j][trial] = len(cnm.estimates.idx_components)
row = selected_rows.iloc[2 * trial + 1]
# Get the index from the row
index = row.name
main_component_evaluation(index, row, parameters_component_evaluation)
component_evaluation_output = eval(row.loc['component_evaluation_output'])
input_hdf5_file_path = component_evaluation_output['main']
cnm = load_CNMF(input_hdf5_file_path)
number_cells_R[i][j][trial] = len(cnm.estimates.C)
number_cells_ac_R[i][j][trial] = len(cnm.estimates.idx_components)
j = j + 1
i = i + 1
# %% Ploting
N_trials = N_trials + 1
vlines = [1, 6, 11, 16, 22, 27]
vlines_session = [21]
ymin = 5
fig, axes = plt.subplots(2, 4, sharex=True)
fig.set_size_inches(12, 10)
linestyle = '-'
axes[0][0].plot(np.arange(1, N_trials), number_cells[0][0][:], linestyle=linestyle)
linestyle = ':'
axes[0][0].plot(np.arange(1, N_trials), number_cells_ac[0][0][:].T, linestyle=linestyle)
axes[0][0].plot(np.arange(1, N_trials), number_cells_ac[1][0][:].T, linestyle=linestyle)
axes[0][0].vlines(vlines, ymin=ymin, ymax=300, color='red', linestyle=linestyle)
axes[0][0].vlines(vlines_session, ymin=ymin, ymax=300, color='blue', linestyle=linestyle)
axes[0][0].set_title('min_SNR = 2')
axes[0][0].set_ylabel('#cells')
axes[0][0].set_xlabel('trial')
axes[0][0].legend(('DetectedCells', 'pcc=0.95', 'pcc=0.99'))
axes[0][0].set_ylim(ymin, 300)
axes[0][1].vlines(vlines, ymin=ymin, ymax=300, color='red', linestyle=linestyle)
axes[0][1].vlines(vlines_session, ymin=ymin, ymax=300, color='blue', linestyle=linestyle)
linestyle = '-'
axes[0][1].plot(np.arange(1, N_trials), number_cells[0][1][:], linestyle=linestyle)
linestyle = ':'
axes[0][1].plot(np.arange(1, N_trials), number_cells_ac[0][1][:].T, linestyle=linestyle)
axes[0][1].plot(np.arange(1, N_trials), number_cells_ac[1][1][:].T, linestyle=linestyle)
axes[0][1].set_title('min_SNR = 3')
axes[0][1].set_ylabel('#cells')
axes[0][1].set_xlabel('trial')
axes[0][1].set_ylim(ymin, 300)
axes[0][2].vlines(vlines, ymin=ymin, ymax=300, color='red', linestyle=linestyle)
axes[0][2].vlines(vlines_session, ymin=ymin, ymax=300, color='blue', linestyle=linestyle)
linestyle = '-'
axes[0][2].plot(np.arange(1, N_trials), number_cells[0][2][:], linestyle=linestyle)
linestyle = ':'
axes[0][2].plot(np.arange(1, N_trials), number_cells_ac[0][2][:].T, linestyle=linestyle)
axes[0][2].plot(np.arange(1, N_trials), number_cells_ac[1][2][:].T, linestyle=linestyle)
axes[0][2].set_title('min_SNR = 4')
axes[0][2].set_ylabel('#cells')
axes[0][2].set_xlabel('trial')
axes[0][2].set_ylim(ymin, 300)
axes[0][3].vlines(vlines, ymin=ymin, ymax=300, color='red', linestyle=linestyle)
axes[0][3].vlines(vlines_session, ymin=ymin, ymax=300, color='blue', linestyle=linestyle)
linestyle = '-'
axes[0][3].plot(
|
np.arange(1, N_trials)
|
numpy.arange
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.units import Quantity, Unit
from astropy.coordinates import Angle
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from ..extern.validator import validate_physical_type
from ..utils.array import array_stats_str
from ..utils.energy import Energy, EnergyBounds
from ..utils.scripts import make_path
from ..utils.gauss import MultiGauss2D
from .psf_3d import PSF3D
from . import EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""
Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_lo : `~astropy.units.Quantity`
Lower energy boundary of the energy bin.
energy_hi : `~astropy.units.Quantity`
Upper energy boundary of the energy bin.
theta : `~astropy.units.Quantity`
Center values of the theta bins.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
energy_thresh_lo : `~astropy.units.Quantity`
Lower save energy threshold of the psf.
energy_thresh_hi : `~astropy.units.Quantity`
Upper save energy threshold of the psf.
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/tests/unbundled/irfs/psf.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68, show_safe_energy=False)
plt.show()
"""
def __init__(
self,
energy_lo,
energy_hi,
theta,
sigmas,
norms,
energy_thresh_lo=Quantity(0.1, "TeV"),
energy_thresh_hi=Quantity(100, "TeV"),
):
# Validate input
validate_physical_type("energy_lo", energy_lo, "energy")
validate_physical_type("energy_hi", energy_hi, "energy")
validate_physical_type("theta", theta, "angle")
validate_physical_type("energy_thresh_lo", energy_thresh_lo, "energy")
validate_physical_type("energy_thresh_hi", energy_thresh_hi, "energy")
# Set attributes
self.energy_lo = energy_lo.to("TeV")
self.energy_hi = energy_hi.to("TeV")
ebounds = EnergyBounds.from_lower_and_upper_bounds(
self.energy_lo, self.energy_hi
)
self.energy = ebounds.log_centers
self.theta = theta.to("deg")
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.energy_thresh_lo = energy_thresh_lo.to("TeV")
self.energy_thresh_hi = energy_thresh_hi.to("TeV")
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
filename = make_path(filename)
with fits.open(str(filename), memmap=False) as hdulist:
psf = cls.from_fits(hdulist[hdu])
return psf
@classmethod
def from_fits(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BintableHDU`
HDU
"""
energy_lo = Quantity(hdu.data["ENERG_LO"][0], "TeV")
energy_hi = Quantity(hdu.data["ENERG_HI"][0], "TeV")
theta = Angle(hdu.data["THETA_LO"][0], "deg")
# Get sigmas
shape = (len(theta), len(energy_hi))
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
opts = {}
try:
opts["energy_thresh_lo"] = Quantity(hdu.header["LO_THRES"], "TeV")
opts["energy_thresh_hi"] = Quantity(hdu.header["HI_THRES"], "TeV")
except KeyError:
pass
return cls(energy_lo, energy_hi, theta, sigmas, norms, **opts)
def to_fits(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"ENERG_LO",
"ENERG_HI",
"THETA_LO",
"THETA_HI",
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["TeV", "TeV", "deg", "deg", "", "deg", "", "deg", "", "deg"]
data = [
self.energy_lo,
self.energy_hi,
self.theta,
self.theta,
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
table = Table()
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header["LO_THRES"] = self.energy_thresh_lo.value
hdu.header["HI_THRES"] = self.energy_thresh_hi.value
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_fits().writeto(filename, *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.image.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.morphology.MultiGauss2D`
Multigauss PSF object.
"""
energy = Energy(energy)
theta = Angle(theta)
# Find nearest energy value
i = np.argmin(np.abs(self.energy - energy))
j = np.argmin(np.abs(self.theta - theta))
# TODO: Use some kind of interpolation to get PSF
# parameters for every energy and theta
# Select correct gauss parameters for given energy and theta
sigmas = [_[j][i] for _ in self.sigmas]
norms = [_[j][i] for _ in self.norms]
pars = {}
pars["scale"], pars["A_2"], pars["A_3"] = norms
pars["sigma_1"], pars["sigma_2"], pars["sigma_3"] = sigmas
psf = HESSMultiGaussPSF(pars)
return psf.to_MultiGauss2D(normalize=True)
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energy = Energy(energy).flatten() # pylint:disable=assignment-from-no-return
theta = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx_energy in range(len(energy)):
for idx_theta in range(len(theta)):
try:
psf = self.psf_at_energy_and_theta(
energy[idx_energy], theta[idx_theta]
)
radius[idx_theta, idx_energy] = psf.containment_radius(fraction)
except ValueError:
log.debug(
"Computing containment failed for E = {:.2f}"
" and Theta={:.2f}".format(energy[idx_energy], theta[idx_theta])
)
log.debug("Sigmas: {} Norms: {}".format(psf.sigmas, psf.norms))
radius[idx_theta, idx_energy] = np.nan
return Angle(radius, "deg")
def plot_containment(
self, fraction=0.68, ax=None, show_safe_energy=False, add_cbar=True, **kwargs
):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_hi
offset = self.theta
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel("Offset ({unit})".format(unit=offset.unit))
ax.set_xlabel("Energy ({unit})".format(unit=energy.unit))
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
if show_safe_energy:
self._plot_safe_energy_range(ax)
if add_cbar:
label = "Containment radius R{:.0f} ({})".format(
100 * fraction, containment.unit
)
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset.value.min()
omax = self.offset.value.max()
ax.hlines(y=esafe.value, xmin=omin, xmax=omax)
label = "Safe energy threshold: {:3.2f}".format(esafe)
ax.text(x=0.1, y=0.9 * esafe.value, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = Energy.equal_log_spacing(self.energy_lo[0], self.energy_hi[-1], 100)
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
label = "{} deg, {:.1f}%".format(theta, 100 * fraction)
ax.plot(energy.value, radius.value, label=label)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=Quantity([1.0, 10.0], "TeV"),
thetas=Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
# Summarise data members
ss += array_stats_str(self.theta.to("deg"), "Theta")
ss += array_stats_str(self.energy_hi, "Energy hi")
ss += array_stats_str(self.energy_lo, "Energy lo")
ss += "Safe energy threshold lo: {:6.3f}\n".format(self.energy_thresh_lo)
ss += "Safe energy threshold hi: {:6.3f}\n".format(self.energy_thresh_hi)
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""
Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(
|
np.arange(0, 1.5, 0.005)
|
numpy.arange
|
import numpy as np
class NaiveBayes():
def __init__(self,train_data,label_data):
'''
given train/label data initializes the NaiveBayes class
Parameters
----------
train_data : numpy-array
label_data : numpy-array
'''
self.train_data = train_data
self.label_data = label_data
self.feature_cout = len(self.train_data[0])
self.classes = np.unique(self.label_data)
self.priors = {i:self.get_prior(i) for i in
|
np.unique(self.label_data)
|
numpy.unique
|
"""
Feedback Magnitude Pruning (FMP)
Author: <NAME> (<EMAIL>)
For the ITU AI/ML in 5G Challenge: Lightning-Fast Modulation Classification with Hardware-Efficient Neural Networks
"""
## Import Packages ##
import torch, math, copy, h5py, os.path, time
import numpy as np
import matplotlib.pyplot as plt
from torch import nn
import brevitas.nn as qnn
from brevitas.quant import IntBias
from brevitas.inject.enum import ScalingImplType
from brevitas.inject.defaults import Int8ActPerTensorFloatMinMaxInit
import torch.nn.utils.prune as prune
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score
## Name experiment, make directory ##
expt_name = '4_bit_fmp'
print('Experiment Name: ' + expt_name)
os.mkdir(expt_name)
pruning_types = ['Unstructured']#['Structured','Unstructured']#
## Adjustable hyperparameters for base model ##
input_bits = 4 # Originally 8
a_bits = 4 # Originally 8
w_bits = 4 # Originally 8
filters_conv = 64 # Originally 64
filters_dense = 128 # Originally 128
## Pruning parameters ##
# define the initial amount of pruning, p (will not go below 0.05, unless structured pruning will not go 0.01)
p = .2
p_copy = copy.deepcopy(p) # copy is used when changing pruning type
# define the factor by which p will be reduced by
n = 2
## Select which GPU to use (if available) ##
gpu = 0
if torch.cuda.is_available():
torch.cuda.device(gpu)
print("Using GPU %d" % gpu)
else:
gpu = None
print("Using CPU only")
## Load Data ##
dataset_path = path_to_data +"/GOLD_XYZ_OSC.0001_1024.hdf5"
os.path.isfile(dataset_path)
class radioml_18_dataset(Dataset):
def __init__(self, dataset_path, load_into_ram=False):
super(radioml_18_dataset, self).__init__()
h5_file = h5py.File(dataset_path,'r')
if load_into_ram:
self.data = h5_file['X'][:]
else:
self.data = h5_file['X']
self.mod = np.argmax(h5_file['Y'], axis=1) # comes in one-hot encoding
self.snr = h5_file['Z'][:,0]
self.len = self.data.shape[0]
self.mod_classes = ['OOK','4ASK','8ASK','BPSK','QPSK','8PSK','16PSK','32PSK',
'16APSK','32APSK','64APSK','128APSK','16QAM','32QAM','64QAM','128QAM','256QAM',
'AM-SSB-WC','AM-SSB-SC','AM-DSB-WC','AM-DSB-SC','FM','GMSK','OQPSK']
self.snr_classes = np.arange(-20., 32., 2) # -20dB to 30dB
# do not touch this seed to ensure the prescribed train/test split!
np.random.seed(2018)
train_indices = []
test_indices = []
for mod in range(0, 24): # all modulations (0 to 23)
for snr_idx in range(0, 26): # all SNRs (0 to 25 = -20dB to +30dB)
# 'X' holds frames strictly ordered by modulation and SNR
start_idx = 26*4096*mod + 4096*snr_idx
indices_subclass = list(range(start_idx, start_idx+4096))
# 90%/10% training/test split, applied evenly for each mod-SNR pair
split = int(np.ceil(0.1 * 4096))
np.random.shuffle(indices_subclass)
train_indices_subclass = indices_subclass[split:]
test_indices_subclass = indices_subclass[:split]
# you could train on a subset of the data, e.g. based on the SNR
# here we use all available training samples
if snr_idx >= 0:
train_indices.extend(train_indices_subclass)
test_indices.extend(test_indices_subclass)
self.train_sampler = torch.utils.data.SubsetRandomSampler(train_indices)
self.test_sampler = torch.utils.data.SubsetRandomSampler(test_indices)
def __getitem__(self, idx):
# transpose frame into Pytorch channels-first format (NCL = -1,2,1024)
return self.data[idx].transpose(), self.mod[idx], self.snr[idx]
def __len__(self):
return self.len
dataset = radioml_18_dataset(dataset_path, load_into_ram=True)
# Setting seeds for reproducibility
torch.manual_seed(0)
|
np.random.seed(0)
|
numpy.random.seed
|
import sapyen_robot
from .base_robot_env import BaseRobotEnv
from robot.python.env.physx_utils import mat2transform, transform2mat
import transforms3d
import numpy as np
import os
from .path_utils import get_assets_path
RGBD_CAMERA_THRESHOLD = 10
CAMERA_TO_LINK = np.zeros([4, 4])
CAMERA_TO_LINK[[0, 1, 2, 3], [2, 0, 1, 3]] = [1, -1, -1, 1]
class MOVOEnv(BaseRobotEnv):
def __init__(self):
"""
Sapien Kinova MOVO base class.
If you want to use it with sapien object environment, do not use __init__ but _init_robot" (most case)
If you just want to load the robot only, you should consider use __init__ but not _init_robot
"""
urdf_path = os.path.join(get_assets_path(), "robot/all_robot.urdf")
gripper_material = self.sim.create_material(3.0, 2.0, 0.01)
BaseRobotEnv.__init__(self, urdf_path, gripper_material)
print("Initiate MOVO Environment in stand alone version")
def _init_robot(self) -> None:
"""
Load the robot and controllers
"""
gripper_material = self.sim.create_material(3.0, 2.0, 0.01)
self._load_robot('../assets/robot/single_gripper.urdf', gripper_material)
def _load_controller(self) -> None:
"""
Create controllers, set pd and force limit to each joint with fine tuned value
"""
controllable_wrapper = self.sim.create_controllable_articulation(self.robot)
self._head_joint = ["pan_joint", "tilt_joint"]
self._gripper_joint = ["right_gripper_finger1_joint", "right_gripper_finger2_joint",
"right_gripper_finger3_joint"]
self._body_joint = ["linear_joint"]
self.manger = sapyen_robot.ControllerManger("movo", controllable_wrapper)
self.head_controller = self.manger.create_joint_velocity_controller(self._head_joint, "head")
self.gripper_controller = self.manger.create_joint_velocity_controller(self._gripper_joint, "gripper")
self.body_controller = self.manger.create_joint_velocity_controller(self._body_joint, "body")
# Add joint state publisher to keep in synchronization with ROS
# You must use it if you want to do cartesian control
self.manger.add_joint_state_publisher(60)
self.manger.add_group_trajectory_controller("right_arm")
self.arm_planner = self.manger.create_group_planner("right_arm")
# Cache gripper limit for execute high level action
joint_limit = self.robot.get_qlimits()
gripper_index = self.robot_joint_names.index(self._gripper_joint[0])
self.__gripper_limit = joint_limit[gripper_index, :]
# Cache robot pose
self.root_theta = 0
self.root_pos = np.array([0, 0], dtype=np.float)
self.init_qpos = [0, 0, 0, 0.25, -1.9347, 0, -1.5318, 0, 0.9512, -2.24, 0.34, 0.64, -1.413, 0, 0, 0]
# Tune PD controller
self.robot.set_pd(20000, 3000, 2000, np.arange(4))
self.robot.set_drive_qpos(self.init_qpos)
self.robot.set_qpos(self.init_qpos)
self.sim.step()
def close_gripper(self, velocity: float = 2) -> None:
"""
Close gripper with given velocity
:param velocity: Velocity of gripper joint
"""
time_step = self.__gripper_limit[1] / velocity * self.simulation_hz
for _ in range(time_step.astype(np.int)):
self.gripper_controller.move_joint(self._gripper_joint, velocity)
def open_gripper(self, velocity: float = 2) -> None:
"""
Open gripper with given velocity
:param velocity: Velocity of gripper joint
"""
time_step = self.__gripper_limit[1] / velocity * self.simulation_hz
for _ in range(time_step.astype(np.int)):
self.gripper_controller.move_joint(self._gripper_joint, -velocity)
def move_robot_to_target_place(self, target_pose):
end_pose = np.eye(4)
end_pose[0:2, 3] = target_pose[0:2, 3]
end_pose[0:2, 0:2] = target_pose[0:2, 0:2]
current_pose = np.eye(4)
current_pose[0:2, 3] = self.root_pos
new_theta = transforms3d.euler.mat2euler(end_pose)[2]
x_axis = end_pose[0:3, 3] - current_pose[0:3, 3]
x_axis /= np.linalg.norm(x_axis)
z_axis = np.array([0, 0, 1])
y_axis = np.cross(z_axis, x_axis)
forward_pose = np.stack([x_axis, y_axis, z_axis], axis=1)
relative_pose =
|
np.linalg.inv(current_pose[0:3, 0:3])
|
numpy.linalg.inv
|
import colorsys
import pickle
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.decomposition import PCA
import ModelFit.config
from uavga.uavgeat import UAVProblem
from uavga.uavgen import UAVGA
class LGFuzzer(object):
def __init__(self, ga_params, model_file, model_trans, model_csv):
"""
:param ga_params:需要参加fuzzing的param
:param model_file: lstm模型文件
:param model_trans: lstm模型的归一化文件
:param model_csv: 数据文件
"""
# 参加ga的param
self.ga = UAVGA(ga_params)
self.ga.set_trans(model_trans)
self.ga.set_model(model_file)
# read csv
data = pd.read_csv(model_csv, header=0, index_col=0)
self.csv_data = data
def random_choie_meanshift(self, segment_csv, rate=0.25):
data_class = segment_csv.reshape(
(-1, segment_csv.shape[1] * segment_csv.shape[2]))
bandwidth = estimate_bandwidth(data_class, quantile=rate)
clf = MeanShift(bandwidth=bandwidth, bin_seeding=True)
clf.fit(data_class)
predicted = clf.labels_
print(f'Meanshift class: {max(predicted)}')
# -------------
c = list(map(lambda x: color(tuple(x)), ncolors(max(predicted) + 1)))
#c = np.random.rand(max(predicted) + 1, 1)
#c = list(map(lambda x: color(tuple(x)), ncolors(max(predicted) + 1)))
colors = [c[i] for i in predicted]
pca = PCA(n_components=2, svd_solver='arpack')
show = pca.fit_transform(data_class)
fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(show[:, 0], show[:, 1], show[:, 2], c=colors, s=5)
plt.scatter(show[:, 0], show[:, 1], c=colors, s=5)
plt.show()
# -------------------------
out = []
for i in range(max(predicted)):
index = np.where(predicted == i)[0]
col_index = np.random.choice(index, min(index.shape[0], 10))
select = segment_csv[col_index]
out.extend(select)
out = np.array(out)
return out
def run(self, num=0, meanshift=False):
"""
开始fuzzing搜索
:param num: 返回的候选的个数
:return:
"""
segment_csv = self.split_segment()
if num != 0 and not meanshift:
index = np.random.choice(np.arange(segment_csv.shape[0]), num)
segment_csv = segment_csv[index, :, :]
elif meanshift:
segment_csv = self.random_choie_meanshift(segment_csv)
obj_population = [] # 种群
for i, context in enumerate(segment_csv):
self.ga.uavproblem.context_value = context
self.ga.run()
obj_population.append(self.ga.population)
print(f'------------------- {i+1} / {segment_csv.shape[0]} -----------------')
with open('result/pop.pkl','wb') as f:
pickle.dump(obj_population, f)
def split_segment(self):
tmp = self.csv_data.to_numpy()[:, :ModelFit.config.CONTEXT_LEN]
return np.array(np.array_split(tmp, tmp.shape[0] // (ModelFit.config.INPUT_LEN + 1), axis=0))
@staticmethod
def return_best_n_gen(n=1):
candidate_vars = []
candidate_objs = []
with open('result/pop.pkl','rb') as f:
obj_populations = pickle.load(f)
for pop in obj_populations:
pop_v = pop.ObjV
pop_p = pop.Phen
candidate_var_index = np.unique(pop_p, axis=0, return_index=True)[1]
pop_v = pop_v[candidate_var_index]
pop_p = pop_p[candidate_var_index]
candidate = [-1] * pop_v
candidate_index = np.argsort(candidate.reshape(-1))
pop_v = pop_v[candidate_index].reshape((-1,1))
pop_p = pop_p[candidate_index].reshape((-1,20))
if n != 0:
candidate_var = pop_v[:min(n, len(pop_v))]
candidate_obj = pop_p[:min(n, len(pop_p))]
candidate_obj = UAVProblem.reasonable_range_static(candidate_obj)
candidate_vars.extend(candidate_var)
candidate_objs.extend(candidate_obj)
return candidate_vars, candidate_objs
@staticmethod
def return_random_n_gen(n=1):
candidate_vars = []
candidate_objs = []
with open('result/pop.pkl', 'rb') as f:
obj_populations = pickle.load(f)
for pop in obj_populations:
pop_v = pop.ObjV
pop_p = pop.Phen
candidate_var_index =
|
np.unique(pop_p, axis=0, return_index=True)
|
numpy.unique
|
import numpy as np
import matplotlib.pyplot as plt
import _pickle as pkl
import pandas as pd
import os,sys,glob, h5py
from neuropixels import generalephys as ephys
from neuropixels.generalephys import placeAxesOnGrid, cleanAxes
from neuropixels import utils_pipeline as utils
from neuropixels import rf_analysis
from neuropixels import psth_and_raster as psth
from neuropixels import reliability_and_precision as rp
from neuropixels.continuous_traces import gaussian_filter1d
from scipy.signal import find_peaks
from scipy.stats import pearsonr, spearmanr, zscore
from itertools import combinations
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import warnings
###################################
def synchphys(software_start,frame_timestamps,df_reaches,samplerate = 30000):
#synch the ephys with the behavior to find reachmax timessoftware_start = 56549737/30000 #convert software start time in samples to seconds
st = software_start/samplerate
ts_sec = frame_timestamps/samplerate # convert frame timestamps to seconds
ts_z = ts_sec - st #subtract software start from frames
reachmax = np.array(df_reaches.reachMax)
reachmax = reachmax[~np.isnan(reachmax)]
reachmax = reachmax.astype('int')
reach_times = ts_z[reachmax]
return(reach_times)
def batch_synch_phys(df,df_start,df_reaches,df_timestamps,mouseid,inputs=False):
sst = np.int(df_start[df_start.Mouse_number==mouseid].Proccessor_Start_time)
ind = df_timestamps[df_timestamps.mouse==mouseid].index
ts = np.array(df_timestamps[df_timestamps.mouse==mouseid].timestamps[ind])
ts = np.array(ts[0])
df_reaches = df_reaches[df_reaches.mouse==mouseid]
print('mouse: '+str(mouseid))
print('number of timestamps: '+str(np.shape(ts)))
# if np.shape(ts)[0] > 1000000:
bad_align = []
if inputs==True:
need_ts = input("remove odd indices? (yes or no) ")
if need_ts == 'yes':
print('removed odd indices')
ts_lst = ts.tolist()
del ts_lst[1::2] #remove odd indices
ts = np.array(ts_lst)
else:
ts = ts
try:
reach_times = synchphys(sst,ts,df_reaches)
df_reaches['rMax_t']= reach_times
print('aligned')
except:
print('FRAME TIMESTAMPS DO NOT ALIGN WITH BEHAVIOR '+str(mouseid) + '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
pass
print('plotting heatmap to confirm')
trial_ave_heatmap(df,df_reaches,mouseid,binsize=0.020, start = -3.0,end = 3.0)
print('heatmap complete')
check_heatmap = input("is this correct? (yes or no) ")
bad_align = []
if check_heatmap == 'no':
print('ALIGNING FAILED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! MOUSEID APPENDED TO BAD_ALIGN OUTPUT LIST')
bad_align.append(mouseid)
else:
return(df_reaches)
else:
if np.shape(ts)[0] > 872236:
print('removed odd indices')
ts_lst = ts.tolist()
del ts_lst[1::2] #remove odd indices
ts = np.array(ts_lst)
else:
ts = ts
try:
reach_times = synchphys(sst,ts,df_reaches)
df_reaches['rMax_t']= reach_times
print('aligned')
return(df_reaches)
except:
print('FRAME TIMESTAMPS DO NOT ALIGN WITH BEHAVIOR '+str(mouseid) + '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
bad_align.append(mouseid)
pass
def reach_info(df,df_reaches,mouseid):
print('computing reach epoch info for mouse ' +str(mouseid))
df = df[df.mouse==mouseid].reset_index()
df_reaches = df_reaches[df_reaches.mouse==mouseid].reset_index()
df_align = pd.DataFrame(df,columns = ['times','depth','layer','group','mouse','cohort','cell','overall_rate','waveform_class','baseline_fr','reach_fr', 'mod up', 'mod down', 'peakfr_up', 'peakfr_down', 'max_epochfr', 'min_epochfr'])
try:
reaches = epochfr(df,df_reaches,start = -0.5,end = 0.5,binsize=0.020)
df_align['reach_fr'] = reaches
baseline = epochfr_baseline(df,df_reaches,start = -1.0,end = -0.5,binsize=0.020)
df_align['baseline_fr'] = baseline
ave_reach,normedbins,ave_reach_ = reachave_tensor(df,df_reaches)
df_align['bin_ave'] = ave_reach
df_align['norm_bin_ave'] = normedbins
mod_up, mod_down, maxfr_ts, minfr_ts, maxfr, minfr = movement_mod(df,df_reaches,startb = -1.0,endb = -0.5,starte = -0.5,ende = 0.5,binsize=0.001)
df_align['mod_up'] = mod_up
df_align['mod_down'] = mod_down
df_align['peakfr_up'] = maxfr_ts
df_align['peakfr_down'] = minfr_ts
df_align['max_epochfr'] = maxfr
df_align['min_epochfr'] = minfr
except:
print(str(mouseid) + ' not aligned properly .. check heatmap and timestamps')
return df_align
################# COMPLETE #####################
def trial_ave_heatmap(df,df_reaches,id_,binsize=0.020, start = -4.0,end = 2.0):
#START IS TIME BEFORE REACHMAX
#END IS TIME AFTER REACHMAX -- IF BEFORE REACHMAX THEN END MUST BE NEGATIVE
#collapses mean fr along bins
#initialize
df = df[df.mouse==id_].reset_index()
df_reaches=df_reaches[df_reaches.mouse==id_].reset_index()
print('creating heatmap for ' + str(id_) + '.....')
try:
mod_up, mod_down, maxfr_ts, minfr_ts = movement_mod_heatmap(df,df_reaches)
df['mod_up'] = mod_up
df['mod_down'] = mod_down
df['peak_up'] = maxfr_ts
df['peak_down'] = minfr_ts
print('movement modulated units found')
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0]-1 #number of bins
byreach=np.zeros((len(df_reaches.rMax_t),num_bins))
ave_reach_=np.zeros((len(df),num_bins)) #for tensor purposes
ave_reach = []
normedbins = []
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
edges=np.arange(tmax+start,tmax+end,binsize) #designated bins around this iteration of reachmax
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
byreach[j,:] = hist
meanbinfr = np.mean(byreach,axis=0)
ave_reach.append(meanbinfr)
ave_reach_[i,:] = meanbinfr #for tensor purposes (neural trajectories)
normbins = (meanbinfr-min(meanbinfr))/max(meanbinfr) #per dailey
normedbins.append(normbins)
print('plotting heatmap')
df['binz'] = normedbins
df_s= df.sort_values(by=['peak_up'])
df_bins_ = df_s.binz
df_heatmap = list(filter(any, df_bins_))
fig, ax = plt.subplots(figsize=(20, 10))
fig = sns.heatmap(df_heatmap)
plt.title(str(id_))
plt.show()
plt.close()
except:
print('movement modulation units not found mouse ' +str(id_))
pass
################ EDITING ########################
def reach_psth(df,df_reaches,binsize=0.020,start=-3,end=8):
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0] #number of bins
byreach=np.zeros((np.shape(df)[0],num_bins))
alltrials = []
outcomes = []
units = []
reach = []
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0] #number of bins
byreach=np.zeros((np.shape(df_reaches)[0],num_bins))
alltrials = []
trajs = []
evs = []
outcomes = []
units = []
reach = []
units_=[]
reachs=[]
hists=[]
hist_rastor = []
for i,times in enumerate(df.times): #compare that unit's spike times to each reach max
t = np.array(times) #for reach unit create an array of that unit's spike times
byreach=np.zeros((np.shape(df_reaches)[0],num_bins))
for j,tmax in enumerate(df_reaches.rMax_t): #for each unit
a = tmax+start
b = tmax+end
unit = df.index[i]
units.append(unit)
rea = df_reaches.behaviors[j]
reach.append(rea)
try:
rd = np.array(t[(t >= a) & (t <= b)]) #find if that unit spiked within designated timeframes around reachmax
edges=np.arange(a,b,binsize) #designated bins around this iteration of reachmax
num_bins=edges.shape[0]-1 #number of bins
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
byreach[j,:] = hist
byreach_df = hists.append(hist) # list
byreach = center(byreach)
except:
[]
alltrials.append(byreach) # tensor rastor - every trial response for every neuron
psth = np.sum(alltrials) #
reachs.append(reach) # which reach
units_.append(units) # unit id
hist_rastor.append(hists) #list rastor
return alltrials,psth,reachs,units_,hist_rastor
################ TO EDIT #########################
def epochfr(df,df_reaches,start,end,binsize=0.020):
## if epoch is BEFORE reachmax (such as baseline) then end variable must be input as negative as per code (i.e. end = -0.5)
byreach=np.zeros((len(df),1))
rd = []
frs = []
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
#rd = np.array(t[(t >= tmax-start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
#edges=np.arange(tmax-start,tmax+end,binsize) #designated bins around specific iteration of reachmax
edges=np.arange(tmax+start,tmax+end,binsize)
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
fr = sum(hist)/abs(end-start) # in Hz
frs.extend([fr])
meanfr = np.mean(frs)
frs = []
byreach[i,:] = meanfr
return byreach
def epochfr_baseline(df,df_reaches,start,end,binsize=0.020):
## if epoch is BEFORE reachmax (such as baseline) then end variable must be input as negative as per code (i.e. end = -0.5)
byreach=np.zeros((len(df),1))
rd = []
frs = []
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
#rd = np.array(t[(t >= tmax-start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
#edges=np.arange(tmax-start,tmax+end,binsize) #designated bins around specific iteration of reachmax
edges=np.arange(tmax+start,tmax+end,binsize)
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
fr = sum(hist)/abs(end-start) ######?????????????????????????????????????????
frs.extend([fr])
meanfr = np.mean(frs)
frs = []
byreach[i,:] = meanfr
return byreach
def reachave_bins(df,df_reaches,start,end,binsize=0.020): #DO NOT USE
#collapses mean fr along bins and normalizes
#start and end variables are negative if before reachmax (i.e. baseline)
asdfasdfasdf
#initialize
ave_reach = []
normedbins = []
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0]-1 #number of bins
ave_reach_tens=np.zeros((len(df),num_bins)) #for tensor
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
edges=np.arange(tmax+start,tmax+end,binsize) #designated bins around this iteration of reachmax
num_bins=edges.shape[0]-1 #number of bins
byreach=np.zeros((len(df_reaches.rMax_t),num_bins)) #initialize or empty byreach
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
byreach[j,:] = hist
meanbinfr = np.mean(byreach,axis=0)
if sum(meanbinfr) > 0:
ave_reach.append(meanbinfr)
ave_reach_tens[i,:] = meanbinfr #for tensor purposes (neural trajectories)
normbins = (meanbinfr-min(meanbinfr))/max(meanbinfr) #per dailey
normedbins.append(normbins)
return ave_reach, normedbins, ave_reach_tens
def reachave_tensor(df,df_reaches,binsize=0.020, start = -4.0,end = 2.0):
#START IS TIME BEFORE REACHMAX
#END IS TIME AFTER REACHMAX -- IF BEFORE REACHMAX THEN END MUST BE NEGATIVE
#collapses mean fr along bins
#initialize
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0]-1 #number of bins
byreach=np.zeros((len(df_reaches.rMax_t),num_bins))
ave_reach_=np.zeros((len(df),num_bins)) #for tensor purposes
ave_reach = []
normedbins = []
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
edges=np.arange(tmax+start,tmax+end,binsize) #designated bins around this iteration of reachmax
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
byreach[j,:] = hist
meanbinfr = np.mean(byreach,axis=0)
ave_reach.append(meanbinfr)
ave_reach_[i,:] = meanbinfr #for tensor purposes (neural trajectories)
normbins = (meanbinfr-min(meanbinfr))/max(meanbinfr) #per dailey
normedbins.append(normbins)
byreach=np.zeros((len(df_reaches.rMax_t),num_bins))
#df_align['bin_ave'] = ave_reach
#df_align['norm_bin_ave'] = normedbins
return ave_reach, normedbins, ave_reach_
def trial_ave_heatmap_old(df,df_reaches,mouseid,binsize=0.020, start = -4.0,end = 2.0):
#START IS TIME BEFORE REACHMAX
#END IS TIME AFTER REACHMAX -- IF BEFORE REACHMAX THEN END MUST BE NEGATIVE
#collapses mean fr along bins
#initialize
df = df[df.mouse==mouseid]
df_reaches=df_reaches[df_reaches.mouse==mouseid]
print('creating heatmap for ' + str(mouseid) + '.....')
try:
mod_up, mod_down, maxfr_ts, minfr_ts = movement_mod(df,df_reaches)
df['mod_up'] = mod_up
df['mod_down'] = mod_down
df['peak_up'] = maxfr_ts
df['peak_down'] = minfr_ts
print('movement modulated units found')
edges=np.arange(start,end,binsize)
num_bins=edges.shape[0]-1 #number of bins
byreach=np.zeros((len(df_reaches.rMax_t),num_bins))
ave_reach_=np.zeros((len(df),num_bins)) #for tensor purposes
ave_reach = []
normedbins = []
for i,times in enumerate(df.times): #for each unit
t = np.array(times) #for reach unit create an array of that unit's spike times
for j,tmax in enumerate(df_reaches.rMax_t): #compare that unit's spike times to each reach max
rd = np.array(t[(t >= tmax+start) & (t <= tmax+end)]) #find if that unit spiked within designated timeframes around reachmax
edges=np.arange(tmax+start,tmax+end,binsize) #designated bins around this iteration of reachmax
hist=np.histogram(rd,edges)[0] #bin spikes into timeframe
byreach[j,:] = hist
meanbinfr = np.mean(byreach,axis=0)
ave_reach.append(meanbinfr)
ave_reach_[i,:] = meanbinfr #for tensor purposes (neural trajectories)
normbins = (meanbinfr-min(meanbinfr))/max(meanbinfr) #per dailey
normedbins.append(normbins)
print('plotting heatmap')
df['binz'] = normedbins
df_s= df.sort_values(by=['peak_up'])
df_bins_ = df_s.binz
df_heatmap = list(filter(any, df_bins_))
fig, ax = plt.subplots(figsize=(20, 10))
fig = sns.heatmap(df_heatmap)
plt.title(str(mouseid))
plt.show()
plt.close()
except:
print('movement modulation units not found mouse ' +str(mouseid))
pass
def countlist(lst): #to count consecutive numbers in movement-related code
streak_count = []
counter = 1
for i in range(len(lst)):
if i != (len(lst) - 1):
diff = lst[i+1] - lst[i]
if diff == 1:
counter += 1
else:
streak_count.append(counter)
counter = 1
else:
streak_count.append(counter)
return(streak_count)
def movement_mod(df,df_reaches,startb = -1.0,endb = -0.5,starte = -0.5,ende = 0.5,binsize=0.001): #binsize 1 ms
edgesb=np.arange(startb,endb,binsize)
num_binsb=edgesb.shape[0]-1 #number of bins
edgese=np.arange(starte,ende,binsize)
num_binse=edgese.shape[0]-1 #number of bins
byreach_b=np.zeros((len(df_reaches.rMax_t),num_binsb))
byreach_e=np.zeros((len(df_reaches.rMax_t),num_binse))
mod_up = []
mod_down = []
maxfr_ts = []
minfr_ts = []
maxfrs = []
minfrs = []
for i,times in enumerate(df.times): #for each unit
t =
|
np.array(times)
|
numpy.array
|
import numpy as np
import numba
import scipy.optimize as sopt
import json
sin = np.sin
cos = np.cos
atan2 = np.arctan2
sqrt = np.sqrt
class cigre_eu_mv_ode_class:
def __init__(self):
self.t_end = 10.000000
self.Dt = 0.0010000
self.decimation = 10.000000
self.itol = 1e-6
self.Dt_max = 0.001000
self.Dt_min = 0.001000
self.solvern = 5
self.imax = 100
self.N_x = 42
self.N_y = 1
self.N_z = 1
self.N_store = 10000
self.params_list = ['R_0102', 'L_0102', 'C_0102', 'R_0203', 'L_0203', 'C_0203', 'R_0304', 'L_0304', 'C_0304', 'R_0308', 'L_0308', 'C_0308', 'R_0405', 'L_0405', 'C_0405', 'R_0506', 'L_0506', 'C_0506', 'R_0607', 'L_0607', 'C_0607', 'R_0708', 'L_0708', 'C_0708', 'R_0809', 'L_0809', 'C_0809', 'R_0910', 'L_0910', 'C_0910', 'R_1011', 'L_1011', 'C_1011', 'i_02_D', 'i_02_Q', 'i_03_D', 'i_03_Q', 'i_04_D', 'i_04_Q', 'i_05_D', 'i_05_Q', 'i_06_D', 'i_06_Q', 'i_07_D', 'i_07_Q', 'i_08_D', 'i_08_Q', 'i_09_D', 'i_09_Q', 'i_10_D', 'i_10_Q', 'i_11_D', 'i_11_Q', 'omega']
self.params_values_list = [1.41282, 0.0064270585739141526, 4.2631325817165496e-07, 2.21442, 0.01007361663003566, 6.681931209640832e-07, 0.30561, 0.001390250258896324, 9.22166976896133e-08, 0.6513, 0.0029628284205987236, 1.96527388518848e-07, 0.28056000000000003, 0.0012762953196425273, 8.465795197734993e-08, 0.77154, 0.0035098121290169496, 2.3280936793771228e-07, 0.12024, 0.0005469837084182259, 3.628197941886425e-08, 0.8366699999999999, 0.0038060949710768213, 2.5246210678959706e-07, 0.16032, 0.0007293116112243012, 4.837597255848566e-08, 0.38577, 0.0017549060645084748, 1.1640468396885614e-07, 0.16533, 0.0007521025990750605, 4.988772170093834e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 314.1592653589793]
self.inputs_ini_list = ['v_01_D', 'v_01_Q', 'u_dummy']
self.inputs_ini_values_list = [0.0, 16329.931618554521, 1.0]
self.inputs_run_list = ['v_01_D', 'v_01_Q', 'u_dummy']
self.inputs_run_values_list = [0.0, 16329.931618554521, 1.0]
self.outputs_list = ['v_01_D']
self.x_list = ['i_l_0102_D', 'i_l_0102_Q', 'i_l_0203_D', 'i_l_0203_Q', 'i_l_0304_D', 'i_l_0304_Q', 'i_l_0308_D', 'i_l_0308_Q', 'i_l_0405_D', 'i_l_0405_Q', 'i_l_0506_D', 'i_l_0506_Q', 'i_l_0607_D', 'i_l_0607_Q', 'i_l_0708_D', 'i_l_0708_Q', 'i_l_0809_D', 'i_l_0809_Q', 'i_l_0910_D', 'i_l_0910_Q', 'i_l_1011_D', 'i_l_1011_Q', 'v_02_D', 'v_02_Q', 'v_03_D', 'v_03_Q', 'v_04_D', 'v_04_Q', 'v_05_D', 'v_05_Q', 'v_06_D', 'v_06_Q', 'v_07_D', 'v_07_Q', 'v_08_D', 'v_08_Q', 'v_09_D', 'v_09_Q', 'v_10_D', 'v_10_Q', 'v_11_D', 'v_11_Q']
self.y_run_list = ['y_dummy']
self.xy_list = self.x_list + self.y_run_list
self.y_ini_list = ['y_dummy']
self.xy_ini_list = self.x_list + self.y_ini_list
self.t = 0.0
self.it = 0
self.it_store = 0
self.xy_prev = np.zeros((self.N_x+self.N_y,1))
self.initialization_tol = 1e-6
self.N_u = len(self.inputs_run_list)
self.sopt_root_method='hybr'
self.sopt_root_jac=True
self.u_ini_list = self.inputs_ini_list
self.u_ini_values_list = self.inputs_ini_values_list
self.u_run_list = self.inputs_run_list
self.u_run_values_list = self.inputs_run_values_list
self.update()
def update(self):
self.N_steps = int(np.ceil(self.t_end/self.Dt))
dt = [
('t_end', np.float64),
('Dt', np.float64),
('decimation', np.float64),
('itol', np.float64),
('Dt_max', np.float64),
('Dt_min', np.float64),
('solvern', np.int64),
('imax', np.int64),
('N_steps', np.int64),
('N_store', np.int64),
('N_x', np.int64),
('N_y', np.int64),
('N_z', np.int64),
('t', np.float64),
('it', np.int64),
('it_store', np.int64),
('idx', np.int64),
('idy', np.int64),
('f', np.float64, (self.N_x,1)),
('x', np.float64, (self.N_x,1)),
('x_0', np.float64, (self.N_x,1)),
('g', np.float64, (self.N_y,1)),
('y_run', np.float64, (self.N_y,1)),
('y_ini', np.float64, (self.N_y,1)),
('y_0', np.float64, (self.N_y,1)),
('h', np.float64, (self.N_z,1)),
('Fx', np.float64, (self.N_x,self.N_x)),
('Fy', np.float64, (self.N_x,self.N_y)),
('Gx', np.float64, (self.N_y,self.N_x)),
('Gy', np.float64, (self.N_y,self.N_y)),
('Fu', np.float64, (self.N_x,self.N_u)),
('Gu', np.float64, (self.N_y,self.N_u)),
('Hx', np.float64, (self.N_z,self.N_x)),
('Hy', np.float64, (self.N_z,self.N_y)),
('Hu', np.float64, (self.N_z,self.N_u)),
('Fx_ini', np.float64, (self.N_x,self.N_x)),
('Fy_ini', np.float64, (self.N_x,self.N_y)),
('Gx_ini', np.float64, (self.N_y,self.N_x)),
('Gy_ini', np.float64, (self.N_y,self.N_y)),
('T', np.float64, (self.N_store+1,1)),
('X', np.float64, (self.N_store+1,self.N_x)),
('Y', np.float64, (self.N_store+1,self.N_y)),
('Z', np.float64, (self.N_store+1,self.N_z)),
('iters', np.float64, (self.N_store+1,1)),
]
values = [
self.t_end,
self.Dt,
self.decimation,
self.itol,
self.Dt_max,
self.Dt_min,
self.solvern,
self.imax,
self.N_steps,
self.N_store,
self.N_x,
self.N_y,
self.N_z,
self.t,
self.it,
self.it_store,
0, # idx
0, # idy
np.zeros((self.N_x,1)), # f
np.zeros((self.N_x,1)), # x
np.zeros((self.N_x,1)), # x_0
np.zeros((self.N_y,1)), # g
np.zeros((self.N_y,1)), # y_run
np.zeros((self.N_y,1)), # y_ini
np.zeros((self.N_y,1)), # y_0
np.zeros((self.N_z,1)), # h
np.zeros((self.N_x,self.N_x)), # Fx
np.zeros((self.N_x,self.N_y)), # Fy
np.zeros((self.N_y,self.N_x)), # Gx
np.zeros((self.N_y,self.N_y)), # Fy
np.zeros((self.N_x,self.N_u)), # Fu
np.zeros((self.N_y,self.N_u)), # Gu
np.zeros((self.N_z,self.N_x)), # Hx
np.zeros((self.N_z,self.N_y)), # Hy
np.zeros((self.N_z,self.N_u)), # Hu
np.zeros((self.N_x,self.N_x)), # Fx_ini
np.zeros((self.N_x,self.N_y)), # Fy_ini
np.zeros((self.N_y,self.N_x)), # Gx_ini
np.zeros((self.N_y,self.N_y)), # Fy_ini
np.zeros((self.N_store+1,1)), # T
np.zeros((self.N_store+1,self.N_x)), # X
np.zeros((self.N_store+1,self.N_y)), # Y
np.zeros((self.N_store+1,self.N_z)), # Z
np.zeros((self.N_store+1,1)), # iters
]
dt += [(item,np.float64) for item in self.params_list]
values += [item for item in self.params_values_list]
for item_id,item_val in zip(self.inputs_ini_list,self.inputs_ini_values_list):
if item_id in self.inputs_run_list: continue
dt += [(item_id,np.float64)]
values += [item_val]
dt += [(item,np.float64) for item in self.inputs_run_list]
values += [item for item in self.inputs_run_values_list]
self.struct = np.rec.array([tuple(values)], dtype=np.dtype(dt))
def load_params(self,data_input):
if type(data_input) == str:
json_file = data_input
self.json_file = json_file
self.json_data = open(json_file).read().replace("'",'"')
data = json.loads(self.json_data)
elif type(data_input) == dict:
data = data_input
self.data = data
for item in self.data:
self.struct[0][item] = self.data[item]
self.params_values_list[self.params_list.index(item)] = self.data[item]
def ini_problem(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini(self.struct,2)
ini(self.struct,3)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_problem(self,x):
t = self.struct[0].t
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(t,self.struct,2)
run(t,self.struct,3)
run(t,self.struct,10)
run(t,self.struct,11)
run(t,self.struct,12)
run(t,self.struct,13)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,13)
A_c = np.block([[self.struct[0].Fx,self.struct[0].Fy],
[self.struct[0].Gx,self.struct[0].Gy]])
return A_c
def eval_jacobians(self):
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
return 1
def ini_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini(self.struct,10)
ini(self.struct,11)
A_c = np.block([[self.struct[0].Fx_ini,self.struct[0].Fy_ini],
[self.struct[0].Gx_ini,self.struct[0].Gy_ini]])
return A_c
def f_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_odeint(self,x,t):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_ivp(self,t,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def Fx_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,10)
return self.struct[0].Fx
def eval_A(self):
Fx = self.struct[0].Fx
Fy = self.struct[0].Fy
Gx = self.struct[0].Gx
Gy = self.struct[0].Gy
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
self.A = A
return A
def eval_A_ini(self):
Fx = self.struct[0].Fx_ini
Fy = self.struct[0].Fy_ini
Gx = self.struct[0].Gx_ini
Gy = self.struct[0].Gy_ini
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
return A
def reset(self):
for param,param_value in zip(self.params_list,self.params_values_list):
self.struct[0][param] = param_value
for input_name,input_value in zip(self.inputs_ini_list,self.inputs_ini_values_list):
self.struct[0][input_name] = input_value
for input_name,input_value in zip(self.inputs_run_list,self.inputs_run_values_list):
self.struct[0][input_name] = input_value
def simulate(self,events,xy0=0):
# initialize both the ini and the run system
self.initialize(events,xy0=xy0)
## solve
#daesolver(self.struct) # run until first event
# simulation run
for event in events[1:]:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
T,X,Y,Z = self.post()
return T,X,Y,Z
def run(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
return 1
def post(self):
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def initialize(self,events,xy0=0):
'''
Parameters
----------
events : dictionary
Dictionary with at least 't_end' and all inputs and parameters
that need to be changed.
xy0 : float or string, optional
0 means all states should be zero as initial guess.
If not zero all the states initial guess are the given input.
If 'prev' it uses the last known initialization result as initial guess.
Returns
-------
T : TYPE
DESCRIPTION.
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
Z : TYPE
DESCRIPTION.
'''
# simulation parameters
self.struct[0].it = 0 # set time step to zero
self.struct[0].it_store = 0 # set storage to zero
self.struct[0].t = 0.0 # set time to zero
# initialization
it_event = 0
event = events[it_event]
for item in event:
self.struct[0][item] = event[item]
## compute initial conditions using x and y_ini
if xy0 == 0:
xy0 = np.zeros(self.N_x+self.N_y)
elif xy0 == 1:
xy0 = np.ones(self.N_x+self.N_y)
elif xy0 == 'prev':
xy0 = self.xy_prev
else:
xy0 = xy0*np.ones(self.N_x+self.N_y)
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.ini_problem, xy0,
jac=self.ini_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.ini_problem, xy0, method=self.sopt_root_method)
self.initialization_ok = True
if sol.success == False:
print('initialization not found!')
self.initialization_ok = False
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
if self.initialization_ok:
xy = sol.x
self.xy_prev = xy
self.struct[0].x[:,0] = xy[0:self.N_x]
self.struct[0].y_run[:,0] = xy[self.N_x:]
## y_ini to u_run
for item in self.inputs_run_list:
if item in self.y_ini_list:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.inputs_ini_list:
if item in self.y_run_list:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.run_problem, xy0,
jac=self.run_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.run_problem, xy0, method=self.sopt_root_method)
# evaluate f and g
run(0.0,self.struct,2)
run(0.0,self.struct,3)
# evaluate run jacobians
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,14)
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def get_value(self,name):
if name in self.inputs_run_list:
value = self.struct[0][name]
if name in self.x_list:
idx = self.x_list.index(name)
value = self.struct[0].x[idx,0]
if name in self.y_run_list:
idy = self.y_run_list.index(name)
value = self.struct[0].y_run[idy,0]
if name in self.params_list:
value = self.struct[0][name]
if name in self.outputs_list:
value = self.struct[0].h[self.outputs_list.index(name),0]
return value
def get_values(self,name):
if name in self.x_list:
values = self.X[:,self.x_list.index(name)]
if name in self.y_run_list:
values = self.Y[:,self.y_run_list.index(name)]
if name in self.outputs_list:
values = self.Z[:,self.outputs_list.index(name)]
return values
def set_value(self,name,value):
if name in self.inputs_run_list:
self.struct[0][name] = value
if name in self.params_list:
self.struct[0][name] = value
@numba.njit(cache=True)
def run(t,struct,mode):
# Parameters:
R_0102 = struct[0].R_0102
L_0102 = struct[0].L_0102
C_0102 = struct[0].C_0102
R_0203 = struct[0].R_0203
L_0203 = struct[0].L_0203
C_0203 = struct[0].C_0203
R_0304 = struct[0].R_0304
L_0304 = struct[0].L_0304
C_0304 = struct[0].C_0304
R_0308 = struct[0].R_0308
L_0308 = struct[0].L_0308
C_0308 = struct[0].C_0308
R_0405 = struct[0].R_0405
L_0405 = struct[0].L_0405
C_0405 = struct[0].C_0405
R_0506 = struct[0].R_0506
L_0506 = struct[0].L_0506
C_0506 = struct[0].C_0506
R_0607 = struct[0].R_0607
L_0607 = struct[0].L_0607
C_0607 = struct[0].C_0607
R_0708 = struct[0].R_0708
L_0708 = struct[0].L_0708
C_0708 = struct[0].C_0708
R_0809 = struct[0].R_0809
L_0809 = struct[0].L_0809
C_0809 = struct[0].C_0809
R_0910 = struct[0].R_0910
L_0910 = struct[0].L_0910
C_0910 = struct[0].C_0910
R_1011 = struct[0].R_1011
L_1011 = struct[0].L_1011
C_1011 = struct[0].C_1011
i_02_D = struct[0].i_02_D
i_02_Q = struct[0].i_02_Q
i_03_D = struct[0].i_03_D
i_03_Q = struct[0].i_03_Q
i_04_D = struct[0].i_04_D
i_04_Q = struct[0].i_04_Q
i_05_D = struct[0].i_05_D
i_05_Q = struct[0].i_05_Q
i_06_D = struct[0].i_06_D
i_06_Q = struct[0].i_06_Q
i_07_D = struct[0].i_07_D
i_07_Q = struct[0].i_07_Q
i_08_D = struct[0].i_08_D
i_08_Q = struct[0].i_08_Q
i_09_D = struct[0].i_09_D
i_09_Q = struct[0].i_09_Q
i_10_D = struct[0].i_10_D
i_10_Q = struct[0].i_10_Q
i_11_D = struct[0].i_11_D
i_11_Q = struct[0].i_11_Q
omega = struct[0].omega
# Inputs:
v_01_D = struct[0].v_01_D
v_01_Q = struct[0].v_01_Q
u_dummy = struct[0].u_dummy
# Dynamical states:
i_l_0102_D = struct[0].x[0,0]
i_l_0102_Q = struct[0].x[1,0]
i_l_0203_D = struct[0].x[2,0]
i_l_0203_Q = struct[0].x[3,0]
i_l_0304_D = struct[0].x[4,0]
i_l_0304_Q = struct[0].x[5,0]
i_l_0308_D = struct[0].x[6,0]
i_l_0308_Q = struct[0].x[7,0]
i_l_0405_D = struct[0].x[8,0]
i_l_0405_Q = struct[0].x[9,0]
i_l_0506_D = struct[0].x[10,0]
i_l_0506_Q = struct[0].x[11,0]
i_l_0607_D = struct[0].x[12,0]
i_l_0607_Q = struct[0].x[13,0]
i_l_0708_D = struct[0].x[14,0]
i_l_0708_Q = struct[0].x[15,0]
i_l_0809_D = struct[0].x[16,0]
i_l_0809_Q = struct[0].x[17,0]
i_l_0910_D = struct[0].x[18,0]
i_l_0910_Q = struct[0].x[19,0]
i_l_1011_D = struct[0].x[20,0]
i_l_1011_Q = struct[0].x[21,0]
v_02_D = struct[0].x[22,0]
v_02_Q = struct[0].x[23,0]
v_03_D = struct[0].x[24,0]
v_03_Q = struct[0].x[25,0]
v_04_D = struct[0].x[26,0]
v_04_Q = struct[0].x[27,0]
v_05_D = struct[0].x[28,0]
v_05_Q = struct[0].x[29,0]
v_06_D = struct[0].x[30,0]
v_06_Q = struct[0].x[31,0]
v_07_D = struct[0].x[32,0]
v_07_Q = struct[0].x[33,0]
v_08_D = struct[0].x[34,0]
v_08_Q = struct[0].x[35,0]
v_09_D = struct[0].x[36,0]
v_09_Q = struct[0].x[37,0]
v_10_D = struct[0].x[38,0]
v_10_Q = struct[0].x[39,0]
v_11_D = struct[0].x[40,0]
v_11_Q = struct[0].x[41,0]
# Algebraic states:
y_dummy = struct[0].y_run[0,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (L_0102*i_l_0102_Q*omega - R_0102*i_l_0102_D + v_01_D - v_02_D)/L_0102
struct[0].f[1,0] = (-L_0102*i_l_0102_D*omega - R_0102*i_l_0102_Q + v_01_Q - v_02_Q)/L_0102
struct[0].f[2,0] = (L_0203*i_l_0203_Q*omega - R_0203*i_l_0203_D + v_02_D - v_03_D)/L_0203
struct[0].f[3,0] = (-L_0203*i_l_0203_D*omega - R_0203*i_l_0203_Q + v_02_Q - v_03_Q)/L_0203
struct[0].f[4,0] = (L_0304*i_l_0304_Q*omega - R_0304*i_l_0304_D + v_03_D - v_04_D)/L_0304
struct[0].f[5,0] = (-L_0304*i_l_0304_D*omega - R_0304*i_l_0304_Q + v_03_Q - v_04_Q)/L_0304
struct[0].f[6,0] = (L_0308*i_l_0308_Q*omega - R_0308*i_l_0308_D + v_03_D - v_08_D)/L_0308
struct[0].f[7,0] = (-L_0308*i_l_0308_D*omega - R_0308*i_l_0308_Q + v_03_Q - v_08_Q)/L_0308
struct[0].f[8,0] = (L_0405*i_l_0405_Q*omega - R_0405*i_l_0405_D + v_04_D - v_05_D)/L_0405
struct[0].f[9,0] = (-L_0405*i_l_0405_D*omega - R_0405*i_l_0405_Q + v_04_Q - v_05_Q)/L_0405
struct[0].f[10,0] = (L_0506*i_l_0506_Q*omega - R_0506*i_l_0506_D + v_05_D - v_06_D)/L_0506
struct[0].f[11,0] = (-L_0506*i_l_0506_D*omega - R_0506*i_l_0506_Q + v_05_Q - v_06_Q)/L_0506
struct[0].f[12,0] = (L_0607*i_l_0607_Q*omega - R_0607*i_l_0607_D + v_06_D - v_07_D)/L_0607
struct[0].f[13,0] = (-L_0607*i_l_0607_D*omega - R_0607*i_l_0607_Q + v_06_Q - v_07_Q)/L_0607
struct[0].f[14,0] = (L_0708*i_l_0708_Q*omega - R_0708*i_l_0708_D + v_07_D - v_08_D)/L_0708
struct[0].f[15,0] = (-L_0708*i_l_0708_D*omega - R_0708*i_l_0708_Q + v_07_Q - v_08_Q)/L_0708
struct[0].f[16,0] = (L_0809*i_l_0809_Q*omega - R_0809*i_l_0809_D + v_08_D - v_09_D)/L_0809
struct[0].f[17,0] = (-L_0809*i_l_0809_D*omega - R_0809*i_l_0809_Q + v_08_Q - v_09_Q)/L_0809
struct[0].f[18,0] = (L_0910*i_l_0910_Q*omega - R_0910*i_l_0910_D + v_09_D - v_10_D)/L_0910
struct[0].f[19,0] = (-L_0910*i_l_0910_D*omega - R_0910*i_l_0910_Q + v_09_Q - v_10_Q)/L_0910
struct[0].f[20,0] = (L_1011*i_l_1011_Q*omega - R_1011*i_l_1011_D + v_10_D - v_11_D)/L_1011
struct[0].f[21,0] = (-L_1011*i_l_1011_D*omega - R_1011*i_l_1011_Q + v_10_Q - v_11_Q)/L_1011
struct[0].f[22,0] = (i_02_D + i_l_0102_D - i_l_0203_D + omega*v_02_Q*(C_0102/2 + C_0203/2))/(C_0102/2 + C_0203/2)
struct[0].f[23,0] = (i_02_Q + i_l_0102_Q - i_l_0203_Q - omega*v_02_D*(C_0102/2 + C_0203/2))/(C_0102/2 + C_0203/2)
struct[0].f[24,0] = (i_03_D + i_l_0203_D - i_l_0304_D - i_l_0308_D + omega*v_03_Q*(C_0203/2 + C_0304/2 + C_0308/2))/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].f[25,0] = (i_03_Q + i_l_0203_Q - i_l_0304_Q - i_l_0308_Q - omega*v_03_D*(C_0203/2 + C_0304/2 + C_0308/2))/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].f[26,0] = (i_04_D + i_l_0304_D - i_l_0405_D + omega*v_04_Q*(C_0304/2 + C_0405/2))/(C_0304/2 + C_0405/2)
struct[0].f[27,0] = (i_04_Q + i_l_0304_Q - i_l_0405_Q - omega*v_04_D*(C_0304/2 + C_0405/2))/(C_0304/2 + C_0405/2)
struct[0].f[28,0] = (i_05_D + i_l_0405_D - i_l_0506_D + omega*v_05_Q*(C_0405/2 + C_0506/2))/(C_0405/2 + C_0506/2)
struct[0].f[29,0] = (i_05_Q + i_l_0405_Q - i_l_0506_Q - omega*v_05_D*(C_0405/2 + C_0506/2))/(C_0405/2 + C_0506/2)
struct[0].f[30,0] = (i_06_D + i_l_0506_D - i_l_0607_D + omega*v_06_Q*(C_0506/2 + C_0607/2))/(C_0506/2 + C_0607/2)
struct[0].f[31,0] = (i_06_Q + i_l_0506_Q - i_l_0607_Q - omega*v_06_D*(C_0506/2 + C_0607/2))/(C_0506/2 + C_0607/2)
struct[0].f[32,0] = (i_07_D + i_l_0607_D - i_l_0708_D + omega*v_07_Q*(C_0607/2 + C_0708/2))/(C_0607/2 + C_0708/2)
struct[0].f[33,0] = (i_07_Q + i_l_0607_Q - i_l_0708_Q - omega*v_07_D*(C_0607/2 + C_0708/2))/(C_0607/2 + C_0708/2)
struct[0].f[34,0] = (i_08_D + i_l_0308_D + i_l_0708_D - i_l_0809_D + omega*v_08_Q*(C_0308/2 + C_0708/2 + C_0809/2))/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].f[35,0] = (i_08_Q + i_l_0308_Q + i_l_0708_Q - i_l_0809_Q - omega*v_08_D*(C_0308/2 + C_0708/2 + C_0809/2))/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].f[36,0] = (i_09_D + i_l_0809_D - i_l_0910_D + omega*v_09_Q*(C_0809/2 + C_0910/2))/(C_0809/2 + C_0910/2)
struct[0].f[37,0] = (i_09_Q + i_l_0809_Q - i_l_0910_Q - omega*v_09_D*(C_0809/2 + C_0910/2))/(C_0809/2 + C_0910/2)
struct[0].f[38,0] = (i_10_D + i_l_0910_D - i_l_1011_D + omega*v_10_Q*(C_0910/2 + C_1011/2))/(C_0910/2 + C_1011/2)
struct[0].f[39,0] = (i_10_Q + i_l_0910_Q - i_l_1011_Q - omega*v_10_D*(C_0910/2 + C_1011/2))/(C_0910/2 + C_1011/2)
struct[0].f[40,0] = 2*(C_1011*omega*v_11_Q/2 + i_11_D + i_l_1011_D)/C_1011
struct[0].f[41,0] = 2*(-C_1011*omega*v_11_D/2 + i_11_Q + i_l_1011_Q)/C_1011
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = u_dummy - y_dummy
# Outputs:
if mode == 3:
struct[0].h[0,0] = v_01_D
if mode == 10:
struct[0].Fx[0,0] = -R_0102/L_0102
struct[0].Fx[0,1] = omega
struct[0].Fx[0,22] = -1/L_0102
struct[0].Fx[1,0] = -omega
struct[0].Fx[1,1] = -R_0102/L_0102
struct[0].Fx[1,23] = -1/L_0102
struct[0].Fx[2,2] = -R_0203/L_0203
struct[0].Fx[2,3] = omega
struct[0].Fx[2,22] = 1/L_0203
struct[0].Fx[2,24] = -1/L_0203
struct[0].Fx[3,2] = -omega
struct[0].Fx[3,3] = -R_0203/L_0203
struct[0].Fx[3,23] = 1/L_0203
struct[0].Fx[3,25] = -1/L_0203
struct[0].Fx[4,4] = -R_0304/L_0304
struct[0].Fx[4,5] = omega
struct[0].Fx[4,24] = 1/L_0304
struct[0].Fx[4,26] = -1/L_0304
struct[0].Fx[5,4] = -omega
struct[0].Fx[5,5] = -R_0304/L_0304
struct[0].Fx[5,25] = 1/L_0304
struct[0].Fx[5,27] = -1/L_0304
struct[0].Fx[6,6] = -R_0308/L_0308
struct[0].Fx[6,7] = omega
struct[0].Fx[6,24] = 1/L_0308
struct[0].Fx[6,34] = -1/L_0308
struct[0].Fx[7,6] = -omega
struct[0].Fx[7,7] = -R_0308/L_0308
struct[0].Fx[7,25] = 1/L_0308
struct[0].Fx[7,35] = -1/L_0308
struct[0].Fx[8,8] = -R_0405/L_0405
struct[0].Fx[8,9] = omega
struct[0].Fx[8,26] = 1/L_0405
struct[0].Fx[8,28] = -1/L_0405
struct[0].Fx[9,8] = -omega
struct[0].Fx[9,9] = -R_0405/L_0405
struct[0].Fx[9,27] = 1/L_0405
struct[0].Fx[9,29] = -1/L_0405
struct[0].Fx[10,10] = -R_0506/L_0506
struct[0].Fx[10,11] = omega
struct[0].Fx[10,28] = 1/L_0506
struct[0].Fx[10,30] = -1/L_0506
struct[0].Fx[11,10] = -omega
struct[0].Fx[11,11] = -R_0506/L_0506
struct[0].Fx[11,29] = 1/L_0506
struct[0].Fx[11,31] = -1/L_0506
struct[0].Fx[12,12] = -R_0607/L_0607
struct[0].Fx[12,13] = omega
struct[0].Fx[12,30] = 1/L_0607
struct[0].Fx[12,32] = -1/L_0607
struct[0].Fx[13,12] = -omega
struct[0].Fx[13,13] = -R_0607/L_0607
struct[0].Fx[13,31] = 1/L_0607
struct[0].Fx[13,33] = -1/L_0607
struct[0].Fx[14,14] = -R_0708/L_0708
struct[0].Fx[14,15] = omega
struct[0].Fx[14,32] = 1/L_0708
struct[0].Fx[14,34] = -1/L_0708
struct[0].Fx[15,14] = -omega
struct[0].Fx[15,15] = -R_0708/L_0708
struct[0].Fx[15,33] = 1/L_0708
struct[0].Fx[15,35] = -1/L_0708
struct[0].Fx[16,16] = -R_0809/L_0809
struct[0].Fx[16,17] = omega
struct[0].Fx[16,34] = 1/L_0809
struct[0].Fx[16,36] = -1/L_0809
struct[0].Fx[17,16] = -omega
struct[0].Fx[17,17] = -R_0809/L_0809
struct[0].Fx[17,35] = 1/L_0809
struct[0].Fx[17,37] = -1/L_0809
struct[0].Fx[18,18] = -R_0910/L_0910
struct[0].Fx[18,19] = omega
struct[0].Fx[18,36] = 1/L_0910
struct[0].Fx[18,38] = -1/L_0910
struct[0].Fx[19,18] = -omega
struct[0].Fx[19,19] = -R_0910/L_0910
struct[0].Fx[19,37] = 1/L_0910
struct[0].Fx[19,39] = -1/L_0910
struct[0].Fx[20,20] = -R_1011/L_1011
struct[0].Fx[20,21] = omega
struct[0].Fx[20,38] = 1/L_1011
struct[0].Fx[20,40] = -1/L_1011
struct[0].Fx[21,20] = -omega
struct[0].Fx[21,21] = -R_1011/L_1011
struct[0].Fx[21,39] = 1/L_1011
struct[0].Fx[21,41] = -1/L_1011
struct[0].Fx[22,0] = 1/(C_0102/2 + C_0203/2)
struct[0].Fx[22,2] = -1/(C_0102/2 + C_0203/2)
struct[0].Fx[22,23] = omega
struct[0].Fx[23,1] = 1/(C_0102/2 + C_0203/2)
struct[0].Fx[23,3] = -1/(C_0102/2 + C_0203/2)
struct[0].Fx[23,22] = -omega
struct[0].Fx[24,2] = 1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[24,4] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[24,6] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[24,25] = omega
struct[0].Fx[25,3] = 1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[25,5] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[25,7] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx[25,24] = -omega
struct[0].Fx[26,4] = 1/(C_0304/2 + C_0405/2)
struct[0].Fx[26,8] = -1/(C_0304/2 + C_0405/2)
struct[0].Fx[26,27] = omega
struct[0].Fx[27,5] = 1/(C_0304/2 + C_0405/2)
struct[0].Fx[27,9] = -1/(C_0304/2 + C_0405/2)
struct[0].Fx[27,26] = -omega
struct[0].Fx[28,8] = 1/(C_0405/2 + C_0506/2)
struct[0].Fx[28,10] = -1/(C_0405/2 + C_0506/2)
struct[0].Fx[28,29] = omega
struct[0].Fx[29,9] = 1/(C_0405/2 + C_0506/2)
struct[0].Fx[29,11] = -1/(C_0405/2 + C_0506/2)
struct[0].Fx[29,28] = -omega
struct[0].Fx[30,10] = 1/(C_0506/2 + C_0607/2)
struct[0].Fx[30,12] = -1/(C_0506/2 + C_0607/2)
struct[0].Fx[30,31] = omega
struct[0].Fx[31,11] = 1/(C_0506/2 + C_0607/2)
struct[0].Fx[31,13] = -1/(C_0506/2 + C_0607/2)
struct[0].Fx[31,30] = -omega
struct[0].Fx[32,12] = 1/(C_0607/2 + C_0708/2)
struct[0].Fx[32,14] = -1/(C_0607/2 + C_0708/2)
struct[0].Fx[32,33] = omega
struct[0].Fx[33,13] = 1/(C_0607/2 + C_0708/2)
struct[0].Fx[33,15] = -1/(C_0607/2 + C_0708/2)
struct[0].Fx[33,32] = -omega
struct[0].Fx[34,6] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[34,14] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[34,16] = -1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[34,35] = omega
struct[0].Fx[35,7] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[35,15] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[35,17] = -1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx[35,34] = -omega
struct[0].Fx[36,16] = 1/(C_0809/2 + C_0910/2)
struct[0].Fx[36,18] = -1/(C_0809/2 + C_0910/2)
struct[0].Fx[36,37] = omega
struct[0].Fx[37,17] = 1/(C_0809/2 + C_0910/2)
struct[0].Fx[37,19] = -1/(C_0809/2 + C_0910/2)
struct[0].Fx[37,36] = -omega
struct[0].Fx[38,18] = 1/(C_0910/2 + C_1011/2)
struct[0].Fx[38,20] = -1/(C_0910/2 + C_1011/2)
struct[0].Fx[38,39] = omega
struct[0].Fx[39,19] = 1/(C_0910/2 + C_1011/2)
struct[0].Fx[39,21] = -1/(C_0910/2 + C_1011/2)
struct[0].Fx[39,38] = -omega
struct[0].Fx[40,20] = 2/C_1011
struct[0].Fx[40,41] = omega
struct[0].Fx[41,21] = 2/C_1011
struct[0].Fx[41,40] = -omega
if mode == 11:
struct[0].Gy[0,0] = -1
if mode > 12:
struct[0].Gu[0,2] = 1
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
R_0102 = struct[0].R_0102
L_0102 = struct[0].L_0102
C_0102 = struct[0].C_0102
R_0203 = struct[0].R_0203
L_0203 = struct[0].L_0203
C_0203 = struct[0].C_0203
R_0304 = struct[0].R_0304
L_0304 = struct[0].L_0304
C_0304 = struct[0].C_0304
R_0308 = struct[0].R_0308
L_0308 = struct[0].L_0308
C_0308 = struct[0].C_0308
R_0405 = struct[0].R_0405
L_0405 = struct[0].L_0405
C_0405 = struct[0].C_0405
R_0506 = struct[0].R_0506
L_0506 = struct[0].L_0506
C_0506 = struct[0].C_0506
R_0607 = struct[0].R_0607
L_0607 = struct[0].L_0607
C_0607 = struct[0].C_0607
R_0708 = struct[0].R_0708
L_0708 = struct[0].L_0708
C_0708 = struct[0].C_0708
R_0809 = struct[0].R_0809
L_0809 = struct[0].L_0809
C_0809 = struct[0].C_0809
R_0910 = struct[0].R_0910
L_0910 = struct[0].L_0910
C_0910 = struct[0].C_0910
R_1011 = struct[0].R_1011
L_1011 = struct[0].L_1011
C_1011 = struct[0].C_1011
i_02_D = struct[0].i_02_D
i_02_Q = struct[0].i_02_Q
i_03_D = struct[0].i_03_D
i_03_Q = struct[0].i_03_Q
i_04_D = struct[0].i_04_D
i_04_Q = struct[0].i_04_Q
i_05_D = struct[0].i_05_D
i_05_Q = struct[0].i_05_Q
i_06_D = struct[0].i_06_D
i_06_Q = struct[0].i_06_Q
i_07_D = struct[0].i_07_D
i_07_Q = struct[0].i_07_Q
i_08_D = struct[0].i_08_D
i_08_Q = struct[0].i_08_Q
i_09_D = struct[0].i_09_D
i_09_Q = struct[0].i_09_Q
i_10_D = struct[0].i_10_D
i_10_Q = struct[0].i_10_Q
i_11_D = struct[0].i_11_D
i_11_Q = struct[0].i_11_Q
omega = struct[0].omega
# Inputs:
v_01_D = struct[0].v_01_D
v_01_Q = struct[0].v_01_Q
u_dummy = struct[0].u_dummy
# Dynamical states:
i_l_0102_D = struct[0].x[0,0]
i_l_0102_Q = struct[0].x[1,0]
i_l_0203_D = struct[0].x[2,0]
i_l_0203_Q = struct[0].x[3,0]
i_l_0304_D = struct[0].x[4,0]
i_l_0304_Q = struct[0].x[5,0]
i_l_0308_D = struct[0].x[6,0]
i_l_0308_Q = struct[0].x[7,0]
i_l_0405_D = struct[0].x[8,0]
i_l_0405_Q = struct[0].x[9,0]
i_l_0506_D = struct[0].x[10,0]
i_l_0506_Q = struct[0].x[11,0]
i_l_0607_D = struct[0].x[12,0]
i_l_0607_Q = struct[0].x[13,0]
i_l_0708_D = struct[0].x[14,0]
i_l_0708_Q = struct[0].x[15,0]
i_l_0809_D = struct[0].x[16,0]
i_l_0809_Q = struct[0].x[17,0]
i_l_0910_D = struct[0].x[18,0]
i_l_0910_Q = struct[0].x[19,0]
i_l_1011_D = struct[0].x[20,0]
i_l_1011_Q = struct[0].x[21,0]
v_02_D = struct[0].x[22,0]
v_02_Q = struct[0].x[23,0]
v_03_D = struct[0].x[24,0]
v_03_Q = struct[0].x[25,0]
v_04_D = struct[0].x[26,0]
v_04_Q = struct[0].x[27,0]
v_05_D = struct[0].x[28,0]
v_05_Q = struct[0].x[29,0]
v_06_D = struct[0].x[30,0]
v_06_Q = struct[0].x[31,0]
v_07_D = struct[0].x[32,0]
v_07_Q = struct[0].x[33,0]
v_08_D = struct[0].x[34,0]
v_08_Q = struct[0].x[35,0]
v_09_D = struct[0].x[36,0]
v_09_Q = struct[0].x[37,0]
v_10_D = struct[0].x[38,0]
v_10_Q = struct[0].x[39,0]
v_11_D = struct[0].x[40,0]
v_11_Q = struct[0].x[41,0]
# Algebraic states:
y_dummy = struct[0].y_ini[0,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (L_0102*i_l_0102_Q*omega - R_0102*i_l_0102_D + v_01_D - v_02_D)/L_0102
struct[0].f[1,0] = (-L_0102*i_l_0102_D*omega - R_0102*i_l_0102_Q + v_01_Q - v_02_Q)/L_0102
struct[0].f[2,0] = (L_0203*i_l_0203_Q*omega - R_0203*i_l_0203_D + v_02_D - v_03_D)/L_0203
struct[0].f[3,0] = (-L_0203*i_l_0203_D*omega - R_0203*i_l_0203_Q + v_02_Q - v_03_Q)/L_0203
struct[0].f[4,0] = (L_0304*i_l_0304_Q*omega - R_0304*i_l_0304_D + v_03_D - v_04_D)/L_0304
struct[0].f[5,0] = (-L_0304*i_l_0304_D*omega - R_0304*i_l_0304_Q + v_03_Q - v_04_Q)/L_0304
struct[0].f[6,0] = (L_0308*i_l_0308_Q*omega - R_0308*i_l_0308_D + v_03_D - v_08_D)/L_0308
struct[0].f[7,0] = (-L_0308*i_l_0308_D*omega - R_0308*i_l_0308_Q + v_03_Q - v_08_Q)/L_0308
struct[0].f[8,0] = (L_0405*i_l_0405_Q*omega - R_0405*i_l_0405_D + v_04_D - v_05_D)/L_0405
struct[0].f[9,0] = (-L_0405*i_l_0405_D*omega - R_0405*i_l_0405_Q + v_04_Q - v_05_Q)/L_0405
struct[0].f[10,0] = (L_0506*i_l_0506_Q*omega - R_0506*i_l_0506_D + v_05_D - v_06_D)/L_0506
struct[0].f[11,0] = (-L_0506*i_l_0506_D*omega - R_0506*i_l_0506_Q + v_05_Q - v_06_Q)/L_0506
struct[0].f[12,0] = (L_0607*i_l_0607_Q*omega - R_0607*i_l_0607_D + v_06_D - v_07_D)/L_0607
struct[0].f[13,0] = (-L_0607*i_l_0607_D*omega - R_0607*i_l_0607_Q + v_06_Q - v_07_Q)/L_0607
struct[0].f[14,0] = (L_0708*i_l_0708_Q*omega - R_0708*i_l_0708_D + v_07_D - v_08_D)/L_0708
struct[0].f[15,0] = (-L_0708*i_l_0708_D*omega - R_0708*i_l_0708_Q + v_07_Q - v_08_Q)/L_0708
struct[0].f[16,0] = (L_0809*i_l_0809_Q*omega - R_0809*i_l_0809_D + v_08_D - v_09_D)/L_0809
struct[0].f[17,0] = (-L_0809*i_l_0809_D*omega - R_0809*i_l_0809_Q + v_08_Q - v_09_Q)/L_0809
struct[0].f[18,0] = (L_0910*i_l_0910_Q*omega - R_0910*i_l_0910_D + v_09_D - v_10_D)/L_0910
struct[0].f[19,0] = (-L_0910*i_l_0910_D*omega - R_0910*i_l_0910_Q + v_09_Q - v_10_Q)/L_0910
struct[0].f[20,0] = (L_1011*i_l_1011_Q*omega - R_1011*i_l_1011_D + v_10_D - v_11_D)/L_1011
struct[0].f[21,0] = (-L_1011*i_l_1011_D*omega - R_1011*i_l_1011_Q + v_10_Q - v_11_Q)/L_1011
struct[0].f[22,0] = (i_02_D + i_l_0102_D - i_l_0203_D + omega*v_02_Q*(C_0102/2 + C_0203/2))/(C_0102/2 + C_0203/2)
struct[0].f[23,0] = (i_02_Q + i_l_0102_Q - i_l_0203_Q - omega*v_02_D*(C_0102/2 + C_0203/2))/(C_0102/2 + C_0203/2)
struct[0].f[24,0] = (i_03_D + i_l_0203_D - i_l_0304_D - i_l_0308_D + omega*v_03_Q*(C_0203/2 + C_0304/2 + C_0308/2))/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].f[25,0] = (i_03_Q + i_l_0203_Q - i_l_0304_Q - i_l_0308_Q - omega*v_03_D*(C_0203/2 + C_0304/2 + C_0308/2))/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].f[26,0] = (i_04_D + i_l_0304_D - i_l_0405_D + omega*v_04_Q*(C_0304/2 + C_0405/2))/(C_0304/2 + C_0405/2)
struct[0].f[27,0] = (i_04_Q + i_l_0304_Q - i_l_0405_Q - omega*v_04_D*(C_0304/2 + C_0405/2))/(C_0304/2 + C_0405/2)
struct[0].f[28,0] = (i_05_D + i_l_0405_D - i_l_0506_D + omega*v_05_Q*(C_0405/2 + C_0506/2))/(C_0405/2 + C_0506/2)
struct[0].f[29,0] = (i_05_Q + i_l_0405_Q - i_l_0506_Q - omega*v_05_D*(C_0405/2 + C_0506/2))/(C_0405/2 + C_0506/2)
struct[0].f[30,0] = (i_06_D + i_l_0506_D - i_l_0607_D + omega*v_06_Q*(C_0506/2 + C_0607/2))/(C_0506/2 + C_0607/2)
struct[0].f[31,0] = (i_06_Q + i_l_0506_Q - i_l_0607_Q - omega*v_06_D*(C_0506/2 + C_0607/2))/(C_0506/2 + C_0607/2)
struct[0].f[32,0] = (i_07_D + i_l_0607_D - i_l_0708_D + omega*v_07_Q*(C_0607/2 + C_0708/2))/(C_0607/2 + C_0708/2)
struct[0].f[33,0] = (i_07_Q + i_l_0607_Q - i_l_0708_Q - omega*v_07_D*(C_0607/2 + C_0708/2))/(C_0607/2 + C_0708/2)
struct[0].f[34,0] = (i_08_D + i_l_0308_D + i_l_0708_D - i_l_0809_D + omega*v_08_Q*(C_0308/2 + C_0708/2 + C_0809/2))/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].f[35,0] = (i_08_Q + i_l_0308_Q + i_l_0708_Q - i_l_0809_Q - omega*v_08_D*(C_0308/2 + C_0708/2 + C_0809/2))/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].f[36,0] = (i_09_D + i_l_0809_D - i_l_0910_D + omega*v_09_Q*(C_0809/2 + C_0910/2))/(C_0809/2 + C_0910/2)
struct[0].f[37,0] = (i_09_Q + i_l_0809_Q - i_l_0910_Q - omega*v_09_D*(C_0809/2 + C_0910/2))/(C_0809/2 + C_0910/2)
struct[0].f[38,0] = (i_10_D + i_l_0910_D - i_l_1011_D + omega*v_10_Q*(C_0910/2 + C_1011/2))/(C_0910/2 + C_1011/2)
struct[0].f[39,0] = (i_10_Q + i_l_0910_Q - i_l_1011_Q - omega*v_10_D*(C_0910/2 + C_1011/2))/(C_0910/2 + C_1011/2)
struct[0].f[40,0] = 2*(C_1011*omega*v_11_Q/2 + i_11_D + i_l_1011_D)/C_1011
struct[0].f[41,0] = 2*(-C_1011*omega*v_11_D/2 + i_11_Q + i_l_1011_Q)/C_1011
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = u_dummy - y_dummy
# Outputs:
if mode == 3:
struct[0].h[0,0] = v_01_D
if mode == 10:
struct[0].Fx_ini[0,0] = -R_0102/L_0102
struct[0].Fx_ini[0,1] = omega
struct[0].Fx_ini[0,22] = -1/L_0102
struct[0].Fx_ini[1,0] = -omega
struct[0].Fx_ini[1,1] = -R_0102/L_0102
struct[0].Fx_ini[1,23] = -1/L_0102
struct[0].Fx_ini[2,2] = -R_0203/L_0203
struct[0].Fx_ini[2,3] = omega
struct[0].Fx_ini[2,22] = 1/L_0203
struct[0].Fx_ini[2,24] = -1/L_0203
struct[0].Fx_ini[3,2] = -omega
struct[0].Fx_ini[3,3] = -R_0203/L_0203
struct[0].Fx_ini[3,23] = 1/L_0203
struct[0].Fx_ini[3,25] = -1/L_0203
struct[0].Fx_ini[4,4] = -R_0304/L_0304
struct[0].Fx_ini[4,5] = omega
struct[0].Fx_ini[4,24] = 1/L_0304
struct[0].Fx_ini[4,26] = -1/L_0304
struct[0].Fx_ini[5,4] = -omega
struct[0].Fx_ini[5,5] = -R_0304/L_0304
struct[0].Fx_ini[5,25] = 1/L_0304
struct[0].Fx_ini[5,27] = -1/L_0304
struct[0].Fx_ini[6,6] = -R_0308/L_0308
struct[0].Fx_ini[6,7] = omega
struct[0].Fx_ini[6,24] = 1/L_0308
struct[0].Fx_ini[6,34] = -1/L_0308
struct[0].Fx_ini[7,6] = -omega
struct[0].Fx_ini[7,7] = -R_0308/L_0308
struct[0].Fx_ini[7,25] = 1/L_0308
struct[0].Fx_ini[7,35] = -1/L_0308
struct[0].Fx_ini[8,8] = -R_0405/L_0405
struct[0].Fx_ini[8,9] = omega
struct[0].Fx_ini[8,26] = 1/L_0405
struct[0].Fx_ini[8,28] = -1/L_0405
struct[0].Fx_ini[9,8] = -omega
struct[0].Fx_ini[9,9] = -R_0405/L_0405
struct[0].Fx_ini[9,27] = 1/L_0405
struct[0].Fx_ini[9,29] = -1/L_0405
struct[0].Fx_ini[10,10] = -R_0506/L_0506
struct[0].Fx_ini[10,11] = omega
struct[0].Fx_ini[10,28] = 1/L_0506
struct[0].Fx_ini[10,30] = -1/L_0506
struct[0].Fx_ini[11,10] = -omega
struct[0].Fx_ini[11,11] = -R_0506/L_0506
struct[0].Fx_ini[11,29] = 1/L_0506
struct[0].Fx_ini[11,31] = -1/L_0506
struct[0].Fx_ini[12,12] = -R_0607/L_0607
struct[0].Fx_ini[12,13] = omega
struct[0].Fx_ini[12,30] = 1/L_0607
struct[0].Fx_ini[12,32] = -1/L_0607
struct[0].Fx_ini[13,12] = -omega
struct[0].Fx_ini[13,13] = -R_0607/L_0607
struct[0].Fx_ini[13,31] = 1/L_0607
struct[0].Fx_ini[13,33] = -1/L_0607
struct[0].Fx_ini[14,14] = -R_0708/L_0708
struct[0].Fx_ini[14,15] = omega
struct[0].Fx_ini[14,32] = 1/L_0708
struct[0].Fx_ini[14,34] = -1/L_0708
struct[0].Fx_ini[15,14] = -omega
struct[0].Fx_ini[15,15] = -R_0708/L_0708
struct[0].Fx_ini[15,33] = 1/L_0708
struct[0].Fx_ini[15,35] = -1/L_0708
struct[0].Fx_ini[16,16] = -R_0809/L_0809
struct[0].Fx_ini[16,17] = omega
struct[0].Fx_ini[16,34] = 1/L_0809
struct[0].Fx_ini[16,36] = -1/L_0809
struct[0].Fx_ini[17,16] = -omega
struct[0].Fx_ini[17,17] = -R_0809/L_0809
struct[0].Fx_ini[17,35] = 1/L_0809
struct[0].Fx_ini[17,37] = -1/L_0809
struct[0].Fx_ini[18,18] = -R_0910/L_0910
struct[0].Fx_ini[18,19] = omega
struct[0].Fx_ini[18,36] = 1/L_0910
struct[0].Fx_ini[18,38] = -1/L_0910
struct[0].Fx_ini[19,18] = -omega
struct[0].Fx_ini[19,19] = -R_0910/L_0910
struct[0].Fx_ini[19,37] = 1/L_0910
struct[0].Fx_ini[19,39] = -1/L_0910
struct[0].Fx_ini[20,20] = -R_1011/L_1011
struct[0].Fx_ini[20,21] = omega
struct[0].Fx_ini[20,38] = 1/L_1011
struct[0].Fx_ini[20,40] = -1/L_1011
struct[0].Fx_ini[21,20] = -omega
struct[0].Fx_ini[21,21] = -R_1011/L_1011
struct[0].Fx_ini[21,39] = 1/L_1011
struct[0].Fx_ini[21,41] = -1/L_1011
struct[0].Fx_ini[22,0] = 1/(C_0102/2 + C_0203/2)
struct[0].Fx_ini[22,2] = -1/(C_0102/2 + C_0203/2)
struct[0].Fx_ini[22,23] = omega
struct[0].Fx_ini[23,1] = 1/(C_0102/2 + C_0203/2)
struct[0].Fx_ini[23,3] = -1/(C_0102/2 + C_0203/2)
struct[0].Fx_ini[23,22] = -omega
struct[0].Fx_ini[24,2] = 1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[24,4] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[24,6] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[24,25] = omega
struct[0].Fx_ini[25,3] = 1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[25,5] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[25,7] = -1/(C_0203/2 + C_0304/2 + C_0308/2)
struct[0].Fx_ini[25,24] = -omega
struct[0].Fx_ini[26,4] = 1/(C_0304/2 + C_0405/2)
struct[0].Fx_ini[26,8] = -1/(C_0304/2 + C_0405/2)
struct[0].Fx_ini[26,27] = omega
struct[0].Fx_ini[27,5] = 1/(C_0304/2 + C_0405/2)
struct[0].Fx_ini[27,9] = -1/(C_0304/2 + C_0405/2)
struct[0].Fx_ini[27,26] = -omega
struct[0].Fx_ini[28,8] = 1/(C_0405/2 + C_0506/2)
struct[0].Fx_ini[28,10] = -1/(C_0405/2 + C_0506/2)
struct[0].Fx_ini[28,29] = omega
struct[0].Fx_ini[29,9] = 1/(C_0405/2 + C_0506/2)
struct[0].Fx_ini[29,11] = -1/(C_0405/2 + C_0506/2)
struct[0].Fx_ini[29,28] = -omega
struct[0].Fx_ini[30,10] = 1/(C_0506/2 + C_0607/2)
struct[0].Fx_ini[30,12] = -1/(C_0506/2 + C_0607/2)
struct[0].Fx_ini[30,31] = omega
struct[0].Fx_ini[31,11] = 1/(C_0506/2 + C_0607/2)
struct[0].Fx_ini[31,13] = -1/(C_0506/2 + C_0607/2)
struct[0].Fx_ini[31,30] = -omega
struct[0].Fx_ini[32,12] = 1/(C_0607/2 + C_0708/2)
struct[0].Fx_ini[32,14] = -1/(C_0607/2 + C_0708/2)
struct[0].Fx_ini[32,33] = omega
struct[0].Fx_ini[33,13] = 1/(C_0607/2 + C_0708/2)
struct[0].Fx_ini[33,15] = -1/(C_0607/2 + C_0708/2)
struct[0].Fx_ini[33,32] = -omega
struct[0].Fx_ini[34,6] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[34,14] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[34,16] = -1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[34,35] = omega
struct[0].Fx_ini[35,7] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[35,15] = 1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[35,17] = -1/(C_0308/2 + C_0708/2 + C_0809/2)
struct[0].Fx_ini[35,34] = -omega
struct[0].Fx_ini[36,16] = 1/(C_0809/2 + C_0910/2)
struct[0].Fx_ini[36,18] = -1/(C_0809/2 + C_0910/2)
struct[0].Fx_ini[36,37] = omega
struct[0].Fx_ini[37,17] = 1/(C_0809/2 + C_0910/2)
struct[0].Fx_ini[37,19] = -1/(C_0809/2 + C_0910/2)
struct[0].Fx_ini[37,36] = -omega
struct[0].Fx_ini[38,18] = 1/(C_0910/2 + C_1011/2)
struct[0].Fx_ini[38,20] = -1/(C_0910/2 + C_1011/2)
struct[0].Fx_ini[38,39] = omega
struct[0].Fx_ini[39,19] = 1/(C_0910/2 + C_1011/2)
struct[0].Fx_ini[39,21] = -1/(C_0910/2 + C_1011/2)
struct[0].Fx_ini[39,38] = -omega
struct[0].Fx_ini[40,20] = 2/C_1011
struct[0].Fx_ini[40,41] = omega
struct[0].Fx_ini[41,21] = 2/C_1011
struct[0].Fx_ini[41,40] = -omega
if mode == 11:
struct[0].Gy_ini[0,0] = -1
@numba.njit(cache=True)
def Piecewise(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def ITE(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def Abs(x):
return np.abs(x)
@numba.njit(cache=True)
def daesolver(struct):
sin = np.sin
cos = np.cos
sqrt = np.sqrt
i = 0
Dt = struct[i].Dt
N_x = struct[i].N_x
N_y = struct[i].N_y
N_z = struct[i].N_z
decimation = struct[i].decimation
eye = np.eye(N_x)
t = struct[i].t
t_end = struct[i].t_end
if struct[i].it == 0:
run(t,struct, 1)
struct[i].it_store = 0
struct[i]['T'][0] = t
struct[i].X[0,:] = struct[i].x[:,0]
struct[i].Y[0,:] = struct[i].y_run[:,0]
struct[i].Z[0,:] = struct[i].h[:,0]
solver = struct[i].solvern
while t<t_end:
struct[i].it += 1
struct[i].t += Dt
t = struct[i].t
if solver == 5: # Teapezoidal DAE as in Milano's book
run(t,struct, 2)
run(t,struct, 3)
x = np.copy(struct[i].x[:])
y = np.copy(struct[i].y_run[:])
f =
|
np.copy(struct[i].f[:])
|
numpy.copy
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import six
import os
from madminer.utils.interfaces.hdf5 import load_madminer_settings, madminer_event_loader
from madminer.utils.analysis import get_theta_benchmark_matrix, get_dtheta_benchmark_matrix
from madminer.morphing import Morpher
from madminer.utils.various import general_init, format_benchmark, math_commands, weighted_quantile, sanitize_array
from madminer.ml import MLForge, EnsembleForge
def project_information(fisher_information, remaining_components):
"""
Calculates projections of a Fisher information matrix, that is, "deletes" the rows and columns corresponding to
some parameters not of interest.
Parameters
----------
fisher_information : ndarray
Original n x n Fisher information.
remaining_components : list of int
List with m entries, each an int with 0 <= remaining_compoinents[i] < n. Denotes which parameters are kept, and
their new order. All other parameters or projected out.
Returns
-------
projected_fisher_information : ndarray
Projected m x m Fisher information, where the `i`-th row or column corresponds to the
`remaining_components[i]`-th row or column of fisher_information.
"""
n_new = len(remaining_components)
fisher_information_new = np.zeros([n_new, n_new])
for xnew, xold in enumerate(remaining_components):
for ynew, yold in enumerate(remaining_components):
fisher_information_new[xnew, ynew] = fisher_information[xold, yold]
return fisher_information_new
def profile_information(fisher_information, remaining_components):
"""
Calculates the profiled Fisher information matrix as defined in Appendix A.4 of arXiv:1612.05261.
Parameters
----------
fisher_information : ndarray
Original n x n Fisher information.
remaining_components : list of int
List with m entries, each an int with 0 <= remaining_compoinents[i] < n. Denotes which parameters are kept, and
their new order. All other parameters or projected out.
Returns
-------
profiled_fisher_information : ndarray
Profiled m x m Fisher information, where the `i`-th row or column corresponds to the
`remaining_components[i]`-th row or column of fisher_information.
"""
# Group components
n_components = len(fisher_information)
remaining_components_checked = []
profiled_components = []
for i in range(n_components):
if i in remaining_components:
remaining_components_checked.append(i)
else:
profiled_components.append(i)
new_index_order = remaining_components + profiled_components
assert len(remaining_components) == len(remaining_components_checked), "Inconsistent input"
# Sort Fisher information such that the remaining components are at the beginning and the profiled at the end
profiled_fisher_information = np.copy(fisher_information[new_index_order, :])
profiled_fisher_information = profiled_fisher_information[:, new_index_order]
# Profile over one component at a time
for c in range(n_components - 1, len(remaining_components) - 1, -1):
profiled_fisher_information = (
profiled_fisher_information[:c, :c]
- np.outer(profiled_fisher_information[c, :c], profiled_fisher_information[c, :c])
/ profiled_fisher_information[c, c]
)
return profiled_fisher_information
class FisherInformation:
"""
Functions to calculate expected Fisher information matrices.
After inializing a `FisherInformation` instance with the filename of a MadMiner file, different information matrices
can be calculated:
* `FisherInformation.calculate_fisher_information_full_truth()` calculates the full truth-level Fisher information.
This is the information in an idealized measurement where all parton-level particles with their charges, flavours,
and four-momenta can be accessed with perfect accuracy.
* `FisherInformation.calculate_fisher_information_full_detector()` calculates the full Fisher information in
realistic detector-level observations, estimated with neural networks. In addition to the MadMiner file, this
requires a trained SALLY or SALLINO estimator as well as an unweighted evaluation sample.
* `FisherInformation.calculate_fisher_information_rate()` calculates the Fisher information in the total cross
section.
* `FisherInformation.calculate_fisher_information_hist1d()` calculates the Fisher information in the histogram of
one (parton-level or detector-level) observable.
* `FisherInformation.calculate_fisher_information_hist2d()` calculates the Fisher information in a two-dimensional
histogram of two (parton-level or detector-level) observables.
* `FisherInformation.histogram_of_fisher_information()` calculates the full truth-level Fisher information in
different slices of one observable (the "distribution of the Fisher information").
Parameters
----------
filename : str
Path to MadMiner file (for instance the output of `madminer.delphes.DelphesProcessor.save()`).
debug : bool, optional
If True, additional detailed debugging output is printed. Default value: False.
"""
def __init__(self, filename, debug=False):
general_init(debug=debug)
self.debug = debug
self.madminer_filename = filename
logging.info("Loading data from %s", filename)
# Load data
(
self.parameters,
self.benchmarks,
self.morphing_components,
self.morphing_matrix,
self.observables,
self.n_samples,
) = load_madminer_settings(filename)
self.n_parameters = len(self.parameters)
self.n_benchmarks = len(self.benchmarks)
logging.info("Found %s parameters:", len(self.parameters))
for key, values in six.iteritems(self.parameters):
logging.info(
" %s (LHA: %s %s, maximal power in squared ME: %s, range: %s)",
key,
values[0],
values[1],
values[2],
values[3],
)
logging.info("Found %s benchmarks:", len(self.benchmarks))
for key, values in six.iteritems(self.benchmarks):
logging.info(" %s: %s", key, format_benchmark(values))
logging.info("Found %s observables: %s", len(self.observables), ", ".join(self.observables))
logging.info("Found %s events", self.n_samples)
# Morphing
self.morpher = None
if self.morphing_matrix is not None and self.morphing_components is not None:
self.morpher = Morpher(self.parameters)
self.morpher.set_components(self.morphing_components)
self.morpher.set_basis(self.benchmarks, morphing_matrix=self.morphing_matrix)
logging.info("Found morphing setup with %s components", len(self.morphing_components))
else:
raise RuntimeError("Did not find morphing setup.")
def calculate_fisher_information_full_truth(self, theta, luminosity=300000.0, cuts=None, efficiency_functions=None):
"""
Calculates the full Fisher information at parton / truth level. This is the information in an idealized
measurement where all parton-level particles with their charges, flavours, and four-momenta can be accessed with
perfect accuracy, i.e. the latent variables `z_parton` can be measured directly.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
fisher_information : ndarray
Expected full truth-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Loop over batches
fisher_info = np.zeros((self.n_parameters, self.n_parameters))
covariance = np.zeros((self.n_parameters, self.n_parameters, self.n_parameters, self.n_parameters))
for observations, weights in madminer_event_loader(self.madminer_filename):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher information
this_fisher_info, this_covariance = self._calculate_fisher_information(
theta, weights, luminosity, sum_events=True, calculate_uncertainty=True
)
fisher_info += this_fisher_info
covariance += this_covariance
return fisher_info, covariance
def calculate_fisher_information_full_detector(
self,
theta,
model_file,
unweighted_x_sample_file=None,
luminosity=300000.0,
include_xsec_info=True,
mode="information",
uncertainty="ensemble",
ensemble_vote_expectation_weight=None,
batch_size=100000,
test_split=0.5,
):
"""
Calculates the full Fisher information in realistic detector-level observations, estimated with neural networks.
In addition to the MadMiner file, this requires a trained SALLY or SALLINO estimator.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
model_file : str
Filename of a trained local score regression model that was trained on samples from `theta` (see
`madminer.ml.MLForge`).
unweighted_x_sample_file : str or None
Filename of an unweighted x sample that is sampled according to theta and obeys the cuts
(see `madminer.sampling.SampleAugmenter.extract_samples_train_local()`). If None, the Fisher information
is instead calculated on the full, weighted samples (the data in the MadMiner file). Default value: None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_xsec_info : bool, optional
Whether the rate information is included in the returned Fisher information. Default value: True.
mode : {"score", "information"}, optional
How the ensemble uncertainty on the kinematic Fisher information is calculated. If mode is "information",
the Fisher information for each estimator is calculated individually and only then
are the sample mean and covariance calculated. If mode is "score", the sample mean and covariance are
calculated for the score for each event, and the covariance is then propagated through to the final Fisher
information uncertainty (neglecting the correlation between events). Default value: "information".
uncertainty : {"ensemble", "expectation", "sum"}, optional
How the covariance matrix of the Fisher information estimate is calculated. With "ensemble", the ensemble
covariance is used. With "expectation", the expectation of the score is used as a measure of the uncertainty
of the score estimator, and this uncertainty is propagated through to the covariance matrix. With "sum",
both terms are summed. Default value: "ensemble".
ensemble_vote_expectation_weight : float or list of float or None, optional
For ensemble models, the factor that determines how much more weight is given to those estimators with small
expectation value. If a list is given, results are returned for each element in the list. If None, or if
`EnsembleForge.calculate_expectation()` has not been called, all estimators are treated equal. Default
value: None.
batch_size : int, optional
Batch size. Default value: 100000.
test_split : float or None, optional
If unweighted_x_sample_file is None, this determines the fraction of weighted events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.5.
Returns
-------
fisher_information : ndarray or list of ndarray
Estimated expected full detector-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
If more then one value ensemble_vote_expectation_weight is given, this is a list with results for all
entries in ensemble_vote_expectation_weight.
fisher_information_uncertainty : ndarray or list of ndarray or None
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`. If more then one value
ensemble_vote_expectation_weight is given, this is a list with results for all entries in
ensemble_vote_expectation_weight.
"""
# Check input
if mode not in ["score", "information"]:
raise ValueError("Unknown mode {}, has to be 'score' or 'information'!".format(mode))
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
# Rate part of Fisher information
fisher_info_rate = 0.0
rate_covariance = 0.0
if include_xsec_info:
logging.info("Evaluating rate Fisher information")
fisher_info_rate, rate_covariance = self.calculate_fisher_information_rate(
theta=theta, luminosity=luminosity
)
# Load SALLY model
if os.path.isdir(model_file):
model_is_ensemble = True
model = EnsembleForge(debug=self.debug)
model.load(model_file)
else:
model_is_ensemble = False
model = MLForge(debug=self.debug)
model.load(model_file)
# Evaluation from weighted events
if unweighted_x_sample_file is None:
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Theta morphing matrix
theta_matrix = get_theta_benchmark_matrix("morphing", theta, self.benchmarks, self.morpher)
# Prepare output
fisher_info_kin = None
covariance = None
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
for i_batch, (observations, weights_benchmarks) in enumerate(
madminer_event_loader(self.madminer_filename, batch_size=batch_size, start=start_event)
):
logging.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
weights_theta = theta_matrix.dot(weights_benchmarks.T)
# Calculate Fisher info on this batch
if model_is_ensemble:
this_fisher_info, this_covariance = model.calculate_fisher_information(
x=observations,
obs_weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
vote_expectation_weight=ensemble_vote_expectation_weight,
mode=mode,
uncertainty=uncertainty,
)
else:
this_fisher_info = model.calculate_fisher_information(
x=observations, weights=weights_theta, n_events=luminosity * np.sum(weights_theta)
)
this_covariance = None
# Sum up results
if fisher_info_kin is None:
fisher_info_kin = this_fisher_info
elif isinstance(fisher_info_kin, list):
for i in range(len(fisher_info_kin)):
fisher_info_kin[i] += this_fisher_info[i]
else:
fisher_info_kin += this_fisher_info
if this_covariance is not None:
if covariance is None:
covariance = this_covariance
elif isinstance(covariance, list):
for i in range(len(covariance)):
covariance[i] += this_covariance[i]
else:
covariance += this_covariance
# Evaluation from unweighted event sample
else:
if model_is_ensemble:
fisher_info_kin, covariance = model.calculate_fisher_information(
unweighted_x_sample_file,
n_events=luminosity * total_xsec,
vote_expectation_weight=ensemble_vote_expectation_weight,
mode=mode,
uncertainty=uncertainty,
)
else:
fisher_info_kin = model.calculate_fisher_information(
unweighted_x_sample_file, n_events=luminosity * total_xsec
)
covariance = None
# Returns
if model_is_ensemble:
if isinstance(ensemble_vote_expectation_weight, list) and len(ensemble_vote_expectation_weight) > 1:
fisher_info_results = [
fisher_info_rate + this_fisher_info_kin for this_fisher_info_kin in fisher_info_kin
]
covariance_results = [rate_covariance + this_covariance for this_covariance in covariance]
return fisher_info_results, covariance_results
else:
return fisher_info_rate + fisher_info_kin, rate_covariance + covariance
return fisher_info_rate + fisher_info_kin, rate_covariance
def calculate_fisher_information_rate(self, theta, luminosity, cuts=None, efficiency_functions=None):
"""
Calculates the Fisher information in a measurement of the total cross section (without any kinematic
information).
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the total cross section with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Get weights at benchmarks
weights_benchmarks, weights_benchmark_uncertainties = self._calculate_xsec(
cuts=cuts, efficiency_functions=efficiency_functions, return_benchmark_xsecs=True, return_error=True
)
weights_benchmarks = weights_benchmarks.reshape((1, -1))
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape((1, -1))
# Get Fisher information
fisher_info, covariance = self._calculate_fisher_information(
theta=theta,
weights_benchmarks=weights_benchmarks,
luminosity=luminosity,
sum_events=True,
calculate_uncertainty=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
)
return fisher_info, covariance
def calculate_fisher_information_hist1d(
self,
theta,
luminosity,
observable,
nbins,
histrange=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=100000,
):
"""
Calculates the Fisher information in the one-dimensional histogram of an (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observable.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable : str
Expression for the observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the histogram, excluding overflow bins.
histrange : tuple of float or None
Minimum and maximum value of the histogram in the form `(min, max)`. Overflow bins are always added. If
None, variable-width bins with equal cross section are constructed automatically
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Automatic dynamic binning
dynamic_binning = histrange is None
if dynamic_binning:
n_bins_total = nbins
# Quantile values
quantile_values = np.linspace(0.0, 1.0, nbins + 1)
# Get data
x_pilot, weights_pilot = next(
madminer_event_loader(self.madminer_filename, batch_size=n_events_dynamic_binning)
)
# Cuts
cut_filter = [self._pass_cuts(x, cuts) for x in x_pilot]
x_pilot = x_pilot[cut_filter]
weights_pilot = weights_pilot[cut_filter]
# Efficiencies
efficiencies = np.array([self._eval_efficiency(x, efficiency_functions) for x in x_pilot])
weights_pilot *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables_pilot = np.asarray([self._eval_observable(x, observable) for x in x_pilot])
# Weights at theta
theta_matrix = get_theta_benchmark_matrix("morphing", theta, self.benchmarks, self.morpher)
weight_theta_pilot = theta_matrix.dot(weights_pilot.T)
# Bin boundaries
bin_boundaries = weighted_quantile(histo_observables_pilot, quantile_values, weight_theta_pilot)
bin_boundaries = bin_boundaries[1:-1]
logging.debug("Automatic dynamic binning: bin boundaries %s", bin_boundaries)
# Manual binning
else:
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Loop over batches
weights_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
for observations, weights in madminer_event_loader(self.madminer_filename):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks[i] += np.sum(weights[bins == i], axis=0)
weights_squared_benchmarks[i] += np.sum(weights[bins == i] ** 2, axis=0)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Calculate Fisher information in histogram
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def calculate_fisher_information_hist2d(
self,
theta,
luminosity,
observable1,
nbins1,
histrange1,
observable2,
nbins2,
histrange2,
cuts=None,
efficiency_functions=None,
):
"""
Calculates the Fisher information in a two-dimensional histogram of two (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observables.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable1 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins1 : int
Number of bins along the first axis in the histogram, excluding overflow bins.
histrange1 : tuple of float
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added.
observable2 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins2 : int
Number of bins along the first axis in the histogram, excluding overflow bins.
histrange2 : tuple of float
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Number of bins
n_bins1_total = nbins1 + 2
bin1_boundaries = np.linspace(histrange1[0], histrange1[1], num=nbins1 + 1)
n_bins2_total = nbins1 + 2
bin2_boundaries = np.linspace(histrange2[0], histrange2[1], num=nbins2 + 1)
# Loop over batches
weights_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
for observations, weights in madminer_event_loader(self.madminer_filename):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo1_observables = np.asarray(
[self._eval_observable(obs_event, observable1) for obs_event in observations]
)
histo2_observables = np.asarray(
[self._eval_observable(obs_event, observable2) for obs_event in observations]
)
# Find bins
bins1 = np.searchsorted(bin1_boundaries, histo1_observables)
bins2 = np.searchsorted(bin2_boundaries, histo2_observables)
assert ((0 <= bins1) & (bins1 < n_bins1_total)).all(), "Wrong bin {}".format(bins1)
assert ((0 <= bins1) & (bins1 < n_bins1_total)).all(), "Wrong bin {}".format(bins1)
# Add up
for i in range(n_bins1_total):
for j in range(n_bins2_total):
if len(weights[(bins1 == i) & (bins2 == j)]) > 0:
weights_benchmarks[i, j] += np.sum(weights[(bins1 == i) & (bins2 == j)], axis=0)
# Calculate Fisher information in histogram
weights_benchmarks = weights_benchmarks.reshape(-1, self.n_benchmarks)
fisher_info = self._calculate_fisher_information(theta, weights_benchmarks, luminosity, sum_events=True)
return fisher_info
def histogram_of_fisher_information(
self, theta, luminosity, observable, nbins, histrange, cuts=None, efficiency_functions=None
):
"""
Calculates the full and rate-only Fisher information in slices of one observable.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
rate_fisher_infos : ndarray
Expected rate-only Fisher information for each slice. Has shape `(n_slices, n_parameters, n_parameters)`.
full_fisher_infos_truth : ndarray
Expected full truth-level Fisher information for each slice. Has shape
`(n_slices, n_parameters, n_parameters)`.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Number of bins
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Loop over batches
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
fisher_info_full_bins = np.zeros((n_bins_total, self.n_parameters, self.n_parameters))
for observations, weights in madminer_event_loader(self.madminer_filename):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher info per event
fisher_info_events = self._calculate_fisher_information(theta, weights, luminosity, sum_events=False)
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# Calculate xsecs in bins
theta_matrix = get_theta_benchmark_matrix("morphing", theta, self.benchmarks, self.morpher)
sigma_bins = theta_matrix.dot(weights_benchmarks_bins.T) # (n_bins,)
# Calculate rate-only Fisher informations in bins
fisher_info_rate_bins = self._calculate_fisher_information(
theta, weights_benchmarks_bins, luminosity, sum_events=False
)
return bin_boundaries, sigma_bins, fisher_info_rate_bins, fisher_info_full_bins
def extract_raw_data(self, theta=None):
"""
Returns all events together with the benchmark weights (if theta is None) or weights for a given theta.
Parameters
----------
theta : None or ndarray, optional
If None, the function returns the benchmark weights. Otherwise it uses morphing to calculate the weights for
this value of theta. Default value: None.
Returns
-------
x : ndarray
Observables with shape `(n_unweighted_samples, n_observables)`.
weights : ndarray
If theta is None, benchmark weights with shape `(n_unweighted_samples, n_benchmarks)` in pb. Otherwise,
weights for the given parameter theta with shape `(n_unweighted_samples,)` in pb.
"""
x, weights_benchmarks = next(madminer_event_loader(self.madminer_filename, batch_size=None))
if theta is not None:
theta_matrix = get_theta_benchmark_matrix("morphing", theta, self.benchmarks, self.morpher)
weights_theta = theta_matrix.dot(weights_benchmarks.T)
return x, weights_theta
return x, weights_benchmarks
def extract_observables_and_weights(self, thetas):
"""
Extracts observables and weights for given parameter points.
Parameters
----------
thetas : ndarray
Parameter points, with shape `(n_thetas, n_parameters)`.
Returns
-------
x : ndarray
Observations `x` with shape `(n_events, n_observables)`.
weights : ndarray
Weights `dsigma(x|theta)` in pb with shape `(n_thetas, n_events)`.
"""
x, weights_benchmarks = next(madminer_event_loader(self.madminer_filename, batch_size=None))
weights_thetas = []
for theta in thetas:
theta_matrix = get_theta_benchmark_matrix("morphing", theta, self.benchmarks, self.morpher)
weights_thetas.append(theta_matrix.dot(weights_benchmarks.T))
weights_thetas =
|
np.array(weights_thetas)
|
numpy.array
|
from __future__ import division, print_function
# Turn off plotting imports for production
if False:
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import argparse
import sys
import numpy as np
from scipy.stats import sigmaclip
import fitsio
from astropy.io import fits as fits_astropy
from astropy.table import Table, vstack
from photutils import CircularAperture, aperture_photometry
from astrometry.util.file import trymakedirs
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.util.util import wcs_pv2sip_hdr
from astrometry.util.ttime import Time
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
from tractor.splinesky import SplineSky
import legacypipe
from legacypipe.ps1cat import ps1cat
from legacypipe.gaiacat import GaiaCatalog
from legacypipe.survey import radec_at_mjd, get_git_version
from legacypipe.image import validate_procdate_plver
CAMERAS=['decam','mosaic','90prime','megaprime']
def ptime(text,t0):
tnow=Time()
print('TIMING:%s ' % text,tnow-t0)
return tnow
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
if len(lines) < 1: raise ValueError('lines not read properly from %s' % fn)
return np.array( list(np.char.strip(lines)) )
def astropy_to_astrometry_table(t):
T = fits_table()
for c in t.colnames:
T.set(c, t[c])
return T
def _ccds_table(camera='decam'):
'''Initialize the CCDs table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
max_camera_length = max([len(c) for c in CAMERAS])
if max_camera_length > 9:
print('Warning! Increase camera length header card to S{}'.format(max_camera_length))
cols = [
('err_message', 'S30'),
('image_filename', 'S120'),
('image_hdu', 'i2'),
('camera', 'S9'),
('expnum', 'i8'),
('plver', 'S8'),
('procdate', 'S19'),
('plprocid', 'S7'),
('ccdname', 'S4'),
('ccdnum', 'i2'),
('expid', 'S17'),
('object', 'S35'),
('propid', 'S10'),
('filter', 'S1'),
('exptime', 'f4'),
('date_obs', 'S26'),
('mjd_obs', 'f8'),
('ut', 'S15'),
('ha', 'S13'),
('airmass', 'f4'),
('fwhm', 'f4'),
('fwhm_cp', 'f4'),
('gain', 'f4'),
('width', 'i2'),
('height', 'i2'),
('ra_bore', 'f8'),
('dec_bore', 'f8'),
('crpix1', 'f4'),
('crpix2', 'f4'),
('crval1', 'f8'),
('crval2', 'f8'),
('cd1_1', 'f4'),
('cd1_2', 'f4'),
('cd2_1', 'f4'),
('cd2_2', 'f4'),
('pixscale', 'f4'),
('zptavg', 'f4'),
('yshift', 'bool'),
# -- CCD-level quantities --
('ra', 'f8'),
('dec', 'f8'),
('skysb', 'f4'),
('skycounts', 'f4'),
('skyrms', 'f4'),
('sig1', 'f4'),
('nstars_photom', 'i2'),
('nstars_astrom', 'i2'),
('goodps1', 'i2'),
('goodps1_wbadpix5', 'i2'),
('phoff', 'f4'),
('phrms', 'f4'),
('zpt', 'f4'),
('zpt_wbadpix5', 'f4'),
('transp', 'f4'),
('raoff', 'f4'),
('decoff', 'f4'),
('rarms', 'f4'),
('decrms', 'f4'),
('rastddev', 'f4'),
('decstddev', 'f4'),
]
ccds = Table(np.zeros(1, dtype=cols))
return ccds
def _stars_table(nstars=1):
'''Initialize the stars table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
cols = [('image_filename', 'S100'),('image_hdu', 'i2'),
('expid', 'S16'), ('filter', 'S1'),('nstars', 'i2'),
('x', 'f4'), ('y', 'f4'), ('expnum', 'i8'),
('plver', 'S8'), ('procdate', 'S19'), ('plprocid', 'S7'),
('gain', 'f4'),
('ra', 'f8'), ('dec', 'f8'), ('apmag', 'f4'),('apflux', 'f4'),('apskyflux', 'f4'),('apskyflux_perpix', 'f4'),
('radiff', 'f8'), ('decdiff', 'f8'),
('ps1_mag', 'f4'),
('gaia_g','f8'),('ps1_g','f8'),('ps1_r','f8'),('ps1_i','f8'),('ps1_z','f8'),
('exptime', 'f4')]
stars = Table(np.zeros(nstars, dtype=cols))
return stars
def get_pixscale(camera):
return {'decam':0.262,
'mosaic':0.262,
'90prime':0.455,
'megaprime':0.185}[camera]
def cols_for_survey_table(which='all'):
"""Return list of -survey.fits table colums
Args:
which: all, numeric,
nonzero_diff (numeric and expect non-zero diff with reference
when compute it)
"""
assert(which in ['all','numeric','nonzero_diff'])
martins_keys = ['airmass', '<KEY>']
gods_keys = ['plver', 'procdate', 'plprocid', 'ccdnastrom', 'ccdnphotom']
if which == 'all':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'image_filename','image_hdu','expnum','ccdname','object',
'filter','exptime','camera','width','height','propid',
'mjd_obs',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'ccdrarms', 'ccddecrms', 'ccdskycounts',
'ccdphrms',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms', 'sig1', 'yshift']
elif which == 'numeric':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'expnum',
'exptime','width','height',
'mjd_obs',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms']
elif which == 'nonzero_diff':
need_arjuns_keys= ['ra','dec',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff']
dustins_keys= ['skyrms']
return need_arjuns_keys + dustins_keys + martins_keys + gods_keys
def write_survey_table(T, surveyfn, camera=None, bad_expid=None):
from legacyzpts.psfzpt_cuts import add_psfzpt_cuts
assert(camera in CAMERAS)
need_keys = cols_for_survey_table(which='all')
# Rename
rename_keys= [('zpt','ccdzpt'),
('zptavg','zpt'),
('raoff','ccdraoff'),
('decoff','ccddecoff'),
('skycounts', 'ccdskycounts'),
('skysb', 'ccdskysb'),
('rarms', 'ccdrarms'),
('decrms', 'ccddecrms'),
('phrms', 'ccdphrms'),
('nstars_astrom','ccdnastrom'),
('nstars_photom','ccdnphotom')]
for old,new in rename_keys:
T.rename(old,new)
# Delete
del_keys= list( set(T.get_columns()).difference(set(need_keys)) )
for key in del_keys:
T.delete_column(key)
# precision
T.width = T.width.astype(np.int16)
T.height = T.height.astype(np.int16)
T.cd1_1 = T.cd1_1.astype(np.float32)
T.cd1_2 = T.cd1_2.astype(np.float32)
T.cd2_1 = T.cd2_1.astype(np.float32)
T.cd2_2 = T.cd2_2.astype(np.float32)
# add_psfzpt_cuts(T, camera, bad_expid)
# We now run this as a separate step at the end via
# update_ccd_cuts.
# replace with placeholder that masks everything until this is run.
from legacyzpts import psfzpt_cuts
T.ccd_cuts = np.zeros(len(T), np.int16) + psfzpt_cuts.CCD_CUT_BITS['err_legacyzpts']
writeto_via_temp(surveyfn, T)
print('Wrote %s' % surveyfn)
def create_annotated_table(leg_fn, ann_fn, camera, survey, mp):
from legacyzpts.annotate_ccds import annotate, init_annotations
T = fits_table(leg_fn)
T = survey.cleanup_ccds_table(T)
init_annotations(T)
annotate(T, survey, mp=mp, mzls=(camera == 'mosaic'), bass=(camera == '90prime'),
normalizePsf=True, carryOn=True)
writeto_via_temp(ann_fn, T)
print('Wrote %s' % ann_fn)
def getrms(x):
return np.sqrt( np.mean( np.power(x,2) ) )
class Measurer(object):
"""Main image processing functions for all cameras.
Args:
match_radius: arcsec matching to gaia/ps1
sn_min,sn_max: if not None then then {min,max} S/N will be enforced from
aperture photoemtry, where S/N = apflux/sqrt(skyflux)
"""
def __init__(self, fn, image_dir='images',
calibrate=False, quiet=False,
**kwargs):
self.quiet = quiet
# Set extra kwargs
self.zptsfile= kwargs.get('zptsfile')
self.prefix= kwargs.get('prefix')
self.verboseplots= kwargs.get('verboseplots')
self.fn = os.path.join(image_dir, fn)
self.fn_base = fn
self.debug= kwargs.get('debug')
self.outdir= kwargs.get('outdir')
self.calibdir = kwargs.get('calibdir')
self.calibrate = calibrate
self.primhdr = self.read_primary_header()
self.band = self.get_band()
# CP WCS succeed?
self.goodWcs = self.good_wcs(self.primhdr)
# Camera-agnostic primary header cards
try:
self.propid = self.primhdr['PROPID']
except KeyError:
self.propid = self.primhdr.get('DTPROPID')
self.exptime = self.primhdr['EXPTIME']
self.date_obs = self.primhdr['DATE-OBS']
self.mjd_obs = self.primhdr['MJD-OBS']
self.ut = self.get_ut(self.primhdr)
# Add more attributes.
namechange = dict(date='procdate')
for key in ['AIRMASS','HA', 'DATE', 'PLVER', 'PLPROCID']:
val = self.primhdr.get(key)
if type(val) == str:
val = val.strip()
if len(val) == 0:
raise ValueError('Empty header card: %s' % key)
attrkey = namechange.get(key.lower(), key.lower())
setattr(self, attrkey, val)
self.ra_bore,self.dec_bore = self.get_radec_bore(self.primhdr)
if self.airmass is None:
# Recompute it
site = self.get_site()
if site is None:
print('AIRMASS missing and site not defined.')
else:
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz
time = Time(self.mjd_obs, format='mjd')
coords = SkyCoord(self.ra_bore, self.dec_bore, unit='deg')
altaz = coords.transform_to(AltAz(obstime=time, location=site))
self.airmass = altaz.secz
self.expnum = self.get_expnum(self.primhdr)
if not quiet:
print('CP Header: EXPNUM = ',self.expnum)
print('CP Header: PROCDATE = ',self.procdate)
print('CP Header: PLVER = ',self.plver)
print('CP Header: PLPROCID = ',self.plprocid)
self.obj = self.primhdr['OBJECT']
def get_extension_list(self, fn, debug=False):
raise RuntimeError('get_extension_list not implemented in type ' + str(type(self)))
def good_wcs(self, primhdr):
return primhdr.get('WCSCAL', '').strip().lower().startswith('success')
def get_site(self):
return None
def read_primary_header(self):
try:
primhdr = read_primary_header(self.fn)
except ValueError:
# astropy can handle it
tmp= fits_astropy.open(self.fn)
primhdr= tmp[0].header
tmp.close()
del tmp
return primhdr
def get_radec_bore(self, primhdr):
# {RA,DEC}: center of exposure, TEL{RA,DEC}: boresight of telescope
# In some DECam exposures, RA,DEC are floating-point, but RA is in *decimal hours*.
# In others, RA does not exist (eg CP/V4.8.2a/CP20160824/c4d_160825_062109_ooi_g_ls9.fits.fz)
# Fall back to TELRA in that case.
ra_bore = dec_bore = None
if 'RA' in primhdr.keys():
try:
ra_bore = hmsstring2ra(primhdr['RA'])
dec_bore = dmsstring2dec(primhdr['DEC'])
except:
pass
if dec_bore is None and 'TELRA' in primhdr.keys():
ra_bore = hmsstring2ra(primhdr['TELRA'])
dec_bore = dmsstring2dec(primhdr['TELDEC'])
if dec_bore is None:
raise ValueError('Failed to parse RA or TELRA in primary header to get telescope boresight')
return ra_bore, dec_bore
def get_good_image_subregion(self):
'''
Returns x0,x1,y0,y1 of the good region of this chip,
or None if no cut should be applied to that edge; returns
(None,None,None,None) if the whole chip is good.
This cut is applied in addition to any masking in the mask or
invvar map.
'''
return None,None,None,None
def get_expnum(self, primhdr):
return primhdr['EXPNUM']
def get_ut(self, primhdr):
return primhdr['TIME-OBS']
def zeropoint(self, band):
return self.zp0[band]
def extinction(self, band):
return self.k_ext[band]
def read_header(self, ext):
return fitsio.read_header(self.fn, ext=ext)
def set_hdu(self,ext):
self.ext = ext.strip()
self.ccdname= ext.strip()
self.expid = '{:08d}-{}'.format(self.expnum, self.ccdname)
hdulist= fitsio.FITS(self.fn)
self.image_hdu= hdulist[ext].get_extnum() #NOT ccdnum in header!
# use header
self.hdr = self.read_header(ext)
# Sanity check
assert(self.ccdname.upper() == self.hdr['EXTNAME'].strip().upper())
self.ccdnum = np.int(self.hdr.get('CCDNUM', 0))
self.gain= self.get_gain(self.hdr)
# WCS
self.wcs = self.get_wcs()
# Pixscale is assumed CONSTANT! per camera
self.height, self.width = self.get_hdu_shape(self.hdr, hdulist[ext])
self.fwhm_cp = self.get_fwhm(self.hdr, hdulist[ext])
x0,x1,y0,y1 = self.get_good_image_subregion()
if x0 is None and x1 is None and y0 is None and y1 is None:
slc = None
else:
x0 = x0 or 0
x1 = x1 or self.width
y0 = y0 or 0
y1 = y1 or self.height
slc = slice(y0,y1),slice(x0,x1)
self.slc = slc
def get_hdu_shape(self, hdr, hdu):
h,w = hdu.get_info()['dims']
return int(h), int(w)
def get_fwhm(self, hdr, hdu):
return hdr['FWHM']
def read_bitmask(self):
dqfn = self.get_bitmask_fn(self.fn)
if self.slc is not None:
mask = fitsio.FITS(dqfn)[self.ext][self.slc]
else:
mask = fitsio.read(dqfn, ext=self.ext)
mask = self.remap_bitmask(mask)
return mask
def get_bitmask_fn(self, imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','ood')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','ood')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
def get_weight_fn(self, imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','oow')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','oow')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
def remap_bitmask(self, mask):
return mask
def read_weight(self, clip=True, clipThresh=0.1, scale=True, bitmask=None):
fn = self.get_weight_fn(self.fn)
if self.slc is not None:
wt = fitsio.FITS(fn)[self.ext][self.slc]
else:
wt = fitsio.read(fn, ext=self.ext)
if bitmask is not None:
# Set all masked pixels to have weight zero.
# bitmask value 1 = bad
wt[bitmask > 0] = 0.
if clip and np.any(wt > 0):
fixed = False
try:
from legacypipe.image import fix_weight_quantization
fixed = fix_weight_quantization(wt, fn, self.ext, self.slc)
except:
import traceback
traceback.print_exc()
if not fixed:
# Clamp near-zero (incl negative!) weight to zero,
# which arise due to fpack.
if clipThresh > 0.:
thresh = clipThresh * np.median(wt[wt > 0])
else:
thresh = 0.
wt[wt < thresh] = 0
if scale:
wt = self.scale_weight(wt)
#assert(np.all(wt >= 0.))
wt[np.where(wt<0.0)] = 0.0
assert(np.all(np.isfinite(wt)))
return wt
def read_image(self):
'''Read the image and header; scale the image.'''
f = fitsio.FITS(self.fn)[self.ext]
if self.slc is not None:
img = f[self.slc]
else:
img = f.read()
hdr = f.read_header()
img = self.scale_image(img)
return img, hdr
def scale_image(self, img):
return img
def scale_weight(self, img):
return img
def remap_invvar(self, invvar, primhdr, img, dq):
# By default, *do not* remap
return invvar
# A function that can be called by a subclasser's remap_invvar() method
def remap_invvar_shotnoise(self, invvar, primhdr, img, dq):
#
# All three cameras scale the image and weight to units of electrons.
# (actually, not DECam any more! But DECamMeasurer doesn't use this
# function.)
#
print('Remapping weight map for', self.fn)
const_sky = primhdr['SKYADU'] # e/s, Recommended sky level keyword from Frank
expt = primhdr['EXPTIME'] # s
with np.errstate(divide='ignore'):
var_SR = 1./invvar # e**2
print('median img:', np.median(img), 'vs sky estimate * exptime', const_sky*expt)
var_Astro = np.abs(img - const_sky * expt) # img in electrons; Poisson process so variance = mean
wt = 1./(var_SR + var_Astro) # 1/(e**2)
# Zero out NaNs and masked pixels
wt[
|
np.isfinite(wt)
|
numpy.isfinite
|
from django.shortcuts import render
from urllib.request import urlopen
from recommender.models import Anime, Genre
import json
from sklearn import linear_model
import numpy as np
def index(request):
return render(request, 'recommender/index.html')
def credits(request):
return render(request, 'recommender/credits.html')
def recommender(request):
try:
with urlopen('https://myanimelist.net/animelist/' + str(request.POST.get('username')) + '/load.json?status=7&offset=0') as url:
anime_list = json.loads(url.read().decode())
except:
err = 'Invalid username!'
return render(request, 'recommender/list.html', {'userid': request.POST['username'], 'error': err})
data_from_users = []
genre_list = []
user_score_list = []
list_watched = []
for ani in anime_list:
if ani['score'] != '0':
try:
obj = Anime.objects.get(aid=int(ani['anime_id']))
except Anime.DoesNotExist:
continue
list_watched.append(obj.aid)
genre_one_hot = [0] * 43
for g in obj.genre.all():
genre_one_hot[g.gid - 1] = 1
genre_list.append(genre_one_hot)
data_from_users.append([float(obj.rating), obj.members])
user_score_list.append(float(ani['score']))
elif ani['num_watched_episodes'] != '0':
list_watched.append(int(ani['anime_id']))
if len(user_score_list) == 0:
err = 'No recommendations can be generated since you haven\'t rated any anime.'
return render(request, 'recommender/list.html', {'userid': request.POST['username'], 'error': err})
data_from_users = np.array(data_from_users, dtype=float)
genre_list =
|
np.array(genre_list, dtype=float)
|
numpy.array
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A(object):
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods(object):
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
if sys.version_info >= (3, 5):
ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass(object):
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_highest_available_pickle_protocol(self):
try:
import pickle5
except ImportError:
pickle5 = None
if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
assert pickle.HIGHEST_PROTOCOL >= 5
else:
assert pickle.HIGHEST_PROTOCOL < 5
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
elif sys.version_info[:2] < (3, 6):
# when calling __reduce_ex__ explicitly with protocol=5 on python
# raise a ValueError saying that protocol 5 is not available for
# this python version
with pytest.raises(ValueError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
gc.collect()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
# gh-12300
with open(self.filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(self.filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, self.filename, dtype=object)
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
with CommaDecimalPointLocale():
self.test_numbers()
self.test_nan()
self.test_inf()
self.test_counted_string()
self.test_ascii()
self.test_malformed()
self.test_tofile_sep()
self.test_tofile_format()
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_names(self):
# Unicode field names are converted to ascii on Python 2:
encodable_name = u'b'
assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
# But raises UnicodeEncodeError if it can't be encoded:
nonencodable_name = u'\uc3bc'
assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
pytest.skip('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(object):
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(object):
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc matmul output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.base is out
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
m1 = np.arange(15.).reshape(5, 3)
m2 = np.arange(21.).reshape(3, 7)
m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.)
vr = np.arange(6.)
m0 = np.zeros((3, 0))
@pytest.mark.parametrize('args', (
# matrix-matrix
(m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
# matrix-matrix-transpose, contiguous and non
(m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
(m3, m3.T), (m3.T, m3),
# matrix-matrix non-contiguous
(m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
# vector-matrix, matrix-vector, contiguous
(m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
# vector-matrix, matrix-vector, vector non-contiguous
(m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
# vector-matrix, matrix-vector, matrix non-contiguous
(m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
# vector-matrix, matrix-vector, both non-contiguous
(m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
# size == 0
(m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
))
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
assert_equal(np.alen(m), 2)
m = [1, 2, 3]
assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
assert_equal(np.alen(m), 2)
def test_singleton(self):
assert_equal(np.alen(5), 1)
class TestChoose(object):
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(object):
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter(object):
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Simple, 1d tests
def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test mirror modes
def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
# Circular mode
def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol(object):
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_invalid_buffer_format(self):
# datetime64 cannot be used fully in a buffer yet
# Should be fixed in the next Numpy major release
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(3, dt)
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError,
_multiarray_tests.get_buffer_info,
np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
def test_out_of_order_fields(self):
dt = np.dtype(dict(
formats=['<i4', '<i4'],
names=['one', 'two'],
offsets=[4, 0],
itemsize=8
))
# overlapping fields cannot be represented by PEP3118
arr = np.empty(1, dt)
with assert_raises(ValueError):
memoryview(arr)
def test_max_dims(self):
a = np.empty((1,) * 32)
self._check_roundtrip(a)
@pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
for dim in shape[::-1]:
t = dim * t
return t
# construct a memoryview with 33 dimensions
c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
m = memoryview(c_u8_33d())
assert_equal(m.ndim, 33)
assert_raises_regex(
RuntimeError, "ndim",
np.array, m)
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
assert_('&' in m.format)
assert_raises_regex(
ValueError, "format string",
np.array, m)
def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
with assert_raises(ValueError) as cm:
np.array(t())
exc = cm.exception
if sys.version_info.major > 2:
with assert_raises_regex(
NotImplementedError,
r"Unrepresentable .* 'u' \(UCS-2 strings\)"
):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
def test_ctypes_struct_via_memoryview(self):
# gh-10528
class foo(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
assert_equal(arr['a'], 1)
assert_equal(arr['b'], 2)
f.a = 3
assert_equal(arr['a'], 3)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
class TestArrayInterface():
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
])
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1(object):
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2(object):
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
_multiarray_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
_multiarray_tests.test_pydatamem_seteventhook_end()
class TestMapIter(object):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
_multiarray_tests.test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
_multiarray_tests.test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(object):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(object):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible(object):
def __bool__(self):
raise NotImplementedError
__nonzero__ = __bool__ # python 2
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
self_containing = np.array([None])
self_containing[0] = self_containing
try:
Error = RecursionError
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array([1])), 1)
assert_equal(int_func(np.array([0])), 0)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises(TypeError, int_func, np.array([1, 2]))
# gh-9972
assert_equal(4, int_func(np.array('4')))
assert_equal(5, int_func(np.bytes_(b'5')))
assert_equal(6, int_func(np.unicode_(u'6')))
class HasTrunc:
def __trunc__(self):
return 3
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
class NotConvertible(object):
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
int_func, np.array(NotConvertible()))
assert_raises(NotImplementedError,
int_func, np.array([NotConvertible()]))
class TestWhere(object):
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(object):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(object):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections_abc.Hashable))
class TestArrayPriority(object):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
# See #7949. Don't use "/" operator With -3 switch, since python reports it
# as a DeprecationWarning
if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(object):
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
assert_(a)
class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
assert_(a)
class TestFormat(object):
def test_0d(self):
a = np.array(np.pi)
assert_equal('{:0.3g}'.format(a), '3.14')
assert_equal('{:0.3g}'.format(a[()]), '3.14')
def test_1d_no_format(self):
a = np.array([np.pi])
assert_equal('{}'.format(a), str(a))
def test_1d_format(self):
# until gh-5543, ensure that the behaviour matches what it used to be
a = np.array([np.pi])
if sys.version_info[:2] >= (3, 4):
assert_raises(TypeError, '{:30}'.format, a)
else:
with suppress_warnings() as sup:
sup.filter(PendingDeprecationWarning)
res = '{:30}'.format(a)
dst = object.__format__(a, '30')
assert_equal(res, dst)
class TestCTypes(object):
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_(isinstance(test_arr.ctypes._ctypes,
_internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def _make_readonly(x):
x.flags.writeable = False
return x
@pytest.mark.parametrize('arr', [
np.array([1, 2, 3]),
np.array([['one', 'two'], ['three', 'four']]),
np.array((1, 2), dtype='i4,i4'),
np.zeros((2,), dtype=
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
),
np.array([None], dtype=object),
np.array([]),
np.empty((0, 0)),
_make_readonly(np.array([1, 2, 3])),
], ids=[
'1d',
'2d',
'structured',
'overlapping',
'object',
'empty',
'empty-2d',
'readonly'
])
def test_ctypes_data_as_holds_reference(self, arr):
# gh-9647
# create a copy to ensure that pytest does not mess with the refcounts
arr = arr.copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
# `ctypes_ptr` should hold onto `arr`
del arr
gc.collect()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
gc.collect()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
class TestWritebackIfCopy(object):
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmax(mat, 0, out=out)
assert_equal(res, range(5))
def test_argmin_with_out(self):
mat = -np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
def test_clip_with_out(self):
mat = np.eye(5)
out = np.eye(5, dtype='i2')
res = np.clip(mat, a_min=-10, a_max=0, out=out)
assert_(res is out)
assert_equal(np.sum(out), 0)
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
# hit one of the failing paths
assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_putmask
np.putmask(a, a>2, a**2)
assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
def test_take_mode_raise(self):
a = np.arange(6, dtype='int')
out = np.empty(2, dtype='int')
np.take(a, [0, 2], out=out, mode='raise')
assert_equal(out, np.array([0, 2]))
def test_choose_mod_raise(self):
a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
out = np.empty((3,3), dtype='int')
choices = [-10, 10]
np.choose(a, choices, out=out, mode='raise')
assert_equal(out, np.array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]]))
def test_flatiter__array__(self):
a = np.arange(9).reshape(3,3)
b = a.T.flat
c = b.__array__()
# triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
del c
def test_dot_out(self):
# if HAVE_CBLAS, will use WRITEBACKIFCOPY
a = np.arange(9, dtype=float).reshape(3,3)
b = np.dot(a, a, out=a)
assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_resolve(arr_wb)
# arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, -100)
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
arr = np.arange(9).reshape(3, 3)
v = arr.T
_multiarray_tests.npy_abuse_writebackifcopy(v)
assert len(sup.log) == 1
def test_view_discard_refcount(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
arr = np.arange(9).reshape(3, 3).T
orig = arr.copy()
if HAS_REFCOUNT:
arr_cnt = sys.getrefcount(arr)
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_discard(arr_wb)
# arr remains unchanged after discard
assert_equal(arr, orig)
# after discard, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
if HAS_REFCOUNT:
assert_equal(arr_cnt, sys.getrefcount(arr))
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, orig)
class TestArange(object):
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
np.arange, 0, np.inf
)
def test_nan_step(self):
assert_raises_regex(
ValueError, "cannot compute length",
np.arange, 0, 1, np.nan
)
def test_zero_step(self):
assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
# empty range
assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
class TestArrayFinalize(object):
""" Tests __array_finalize__ """
def test_receives_base(self):
# gh-11237
class SavesBase(np.ndarray):
def __array_finalize__(self, obj):
self.saved_base = self.base
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
def __array_finalize__(self, obj):
# crash, but keep this object alive
raise Exception(self)
# a plain object can't be weakref'd
class Dummy(object): pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
obj_ref = weakref.ref(obj_arr[()])
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
if sys.version_info.major == 2:
# prevent an extra reference being kept
sys.exc_clear()
obj_subarray = e.exception.args[0]
del e
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
gc.collect()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
gc.collect()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
gc.collect()
assert_(obj_ref() is None, "no references should remain")
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual(object):
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
def test_npymath_complex():
# Smoketest npymath functions
from numpy.core._multiarray_tests import (
npy_cabs, npy_carg)
funcs = {npy_cabs: np.absolute,
npy_carg: np.angle}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.complex64, np.complex128, np.clongdouble)
for fun, npfun in funcs.items():
for x, y in itertools.product(vals, vals):
for t in types:
z = t(complex(x, y))
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy.core._multiarray_tests import (
npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
# alignment code needs to satisfy these requrements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
#
# Complex types are the main problem, whose alignment may not be the same
# as their "uint alignment".
#
# This test might only fail on certain platforms, where uint64 alignment is
# not equal to complex64 alignment. The second 2 tests will only fail
# for DEBUG=1.
d1 = np.dtype('u1,c8', align=True)
d2 = np.dtype('u4,c8', align=True)
d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
# check that C struct matches numpy struct size
s = _multiarray_tests.get_struct_alignments()
for d, (alignment, size) in zip([d1,d2,d3], s):
|
assert_equal(d.alignment, alignment)
|
numpy.testing.assert_equal
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import tensorflow as tf
import math
# CUDA_VISIBLE_DEVICES="-1"
d_sim_func = 1
t_sim_func = 1
alpha = 0.8
lambda_g = 1
gamma = 1
lambda_c = 1.25
start_learning_rate = 0.06
eps = 0.1
adv_reg = 0.3
factor = 25
# k_nn = 4
k_fold = 5
calc_top_k = 15
mini_batch_size = 64
cell = "VCAP"
want_to_show = "%s" %(cell)
save_path = "./sess_saver/0403_%d_UAC_sort_sr%.2f_adv%.2f_eps%.2f_d%d_t%d_fold%d/%s"%(factor, lambda_c, adv_reg, eps, d_sim_func, t_sim_func, k_fold, cell)
data_path = "../after_merge/{ce}/".format(ce=cell)
# train_data = np.load("./train_labels.npy", allow_pickle=True)
all_data =
|
np.load("./train_data.npy", allow_pickle=True)
|
numpy.load
|
# This file combines "helpers", "model" and some of the parameters to make import easier
# models.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
import types
import random
#################################################################################################################################################
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] -
self.numR[-1] - self.numF[-1]])
assert (self.numS[
0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta * S * I) / N - q * (beta_D * S * D_I) / N + xi * R + nu * N - mu_0 * S
dE = (beta * S * I) / N + q * (beta_D * S * D_I) / N - sigma * E - theta_E * psi_E * E - mu_0 * E
dI = sigma * E - gamma * I - mu_I * I - theta_I * psi_I * I - mu_0 * I
dDE = theta_E * psi_E * E - sigma_D * D_E - mu_0 * D_E
dDI = theta_I * psi_I * I + sigma_D * D_E - gamma_D * D_I - mu_D * D_I - mu_0 * D_I
dR = gamma * I + gamma_D * D_I - xi * R - mu_0 * R
dF = mu_I * I + mu_D * D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t + runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t + runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1],
self.numF[-1]]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(
lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0,
self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E,
self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if (T > 0):
self.tmax += T
else:
return False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if (param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param]) != numCheckpoints):
checkpoints[param] = [getattr(self, param)] * numCheckpoints
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if (not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if (verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime - self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if (verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if (self.t < self.tmax):
self.run_epoch(runtime=self.tmax - self.t, dt=dt)
return True
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if (t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line', plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (not ax):
fig, ax = pyplot.subplots()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF / self.N if plot_percentages else self.numF
Eseries = self.numE / self.N if plot_percentages else self.numE
Dseries = (self.numD_E + self.numD_I) / self.N if plot_percentages else (self.numD_E + self.numD_I)
D_Eseries = self.numD_E / self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I / self.N if plot_percentages else self.numD_I
Iseries = self.numI / self.N if plot_percentages else self.numI
Rseries = self.numR / self.N if plot_percentages else self.numR
Sseries = self.numS / self.N if plot_percentages else self.numS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N / 100)]
dashedReference_IDEstack = (
dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[
::int(self.N / 100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--',
label='$I+D+E$ (' + dashed_reference_label + ')', zorder=0)
if (shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (
shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (
self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF',
label='$I+D+E$ (' + shaded_reference_label + ')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if (any(Fseries) and plot_F == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries), topstack, color=color_F, alpha=0.5,
label='$F$', zorder=2)
ax.plot(numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries), color=color_F, zorder=3)
topstack = topstack + Fseries
if (any(Eseries) and plot_E == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries), topstack, color=color_E, alpha=0.5,
label='$E$', zorder=2)
ax.plot(numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries), color=color_E, zorder=3)
topstack = topstack + Eseries
if (combine_D and plot_D_E == 'stacked' and plot_D_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries), topstack, color=color_D_E,
alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot(numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries), color=color_D_E, zorder=3)
topstack = topstack + Dseries
else:
if (any(D_Eseries) and plot_D_E == 'stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries), topstack, color=color_D_E,
alpha=0.5, label='$D_E$', zorder=2)
ax.plot(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries), color=color_D_E, zorder=3)
topstack = topstack + D_Eseries
if (any(D_Iseries) and plot_D_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries), topstack, color=color_D_I,
alpha=0.5, label='$D_I$', zorder=2)
ax.plot(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries), color=color_D_I, zorder=3)
topstack = topstack + D_Iseries
if (any(Iseries) and plot_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries), topstack, color=color_I, alpha=0.5,
label='$I$', zorder=2)
ax.plot(numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries), color=color_I, zorder=3)
topstack = topstack + Iseries
if (any(Rseries) and plot_R == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries), topstack, color=color_R, alpha=0.5,
label='$R$', zorder=2)
ax.plot(numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries), color=color_R, zorder=3)
topstack = topstack + Rseries
if (any(Sseries) and plot_S == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries), topstack, color=color_S, alpha=0.5,
label='$S$', zorder=2)
ax.plot(numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries), color=color_S, zorder=3)
topstack = topstack + Sseries
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (any(Fseries) and plot_F == 'shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries), 0, color=color_F, alpha=0.5, label='$F$',
zorder=4)
ax.plot(numpy.ma.masked_where(Fseries <= 0, self.tseries), numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F, zorder=5)
if (any(Eseries) and plot_E == 'shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries), 0, color=color_E, alpha=0.5, label='$E$',
zorder=4)
ax.plot(numpy.ma.masked_where(Eseries <= 0, self.tseries), numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E, zorder=5)
if (combine_D and (any(Dseries) and plot_D_E == 'shaded' and plot_D_E == 'shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries), 0, color=color_D_E, alpha=0.5,
label='$D_{all}$', zorder=4)
ax.plot(numpy.ma.masked_where(Dseries <= 0, self.tseries), numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_D_E, zorder=5)
else:
if (any(D_Eseries) and plot_D_E == 'shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries), 0, color=color_D_E, alpha=0.5,
label='$D_E$', zorder=4)
ax.plot(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries), color=color_D_E, zorder=5)
if (any(D_Iseries) and plot_D_I == 'shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries), 0, color=color_D_I, alpha=0.5,
label='$D_I$', zorder=4)
ax.plot(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries), color=color_D_I, zorder=5)
if (any(Iseries) and plot_I == 'shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries), 0, color=color_I, alpha=0.5, label='$I$',
zorder=4)
ax.plot(numpy.ma.masked_where(Iseries <= 0, self.tseries), numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I, zorder=5)
if (any(Sseries) and plot_S == 'shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries), 0, color=color_S, alpha=0.5, label='$S$',
zorder=4)
ax.plot(numpy.ma.masked_where(Sseries <= 0, self.tseries), numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S, zorder=5)
if (any(Rseries) and plot_R == 'shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries), 0, color=color_R, alpha=0.5, label='$R$',
zorder=4)
ax.plot(numpy.ma.masked_where(Rseries <= 0, self.tseries), numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R, zorder=5)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (any(Fseries) and plot_F == 'line'):
ax.plot(numpy.ma.masked_where(Fseries <= 0, self.tseries), numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F, label='$F$', zorder=6)
if (any(Eseries) and plot_E == 'line'):
ax.plot(numpy.ma.masked_where(Eseries <= 0, self.tseries), numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E, label='$E$', zorder=6)
if (combine_D and (any(Dseries) and plot_D_E == 'line' and plot_D_E == 'line')):
ax.plot(numpy.ma.masked_where(Dseries <= 0, self.tseries), numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_D_E, label='$D_{all}$', zorder=6)
else:
if (any(D_Eseries) and plot_D_E == 'line'):
ax.plot(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if (any(D_Iseries) and plot_D_I == 'line'):
ax.plot(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if (any(Iseries) and plot_I == 'line'):
ax.plot(numpy.ma.masked_where(Iseries <= 0, self.tseries), numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I, label='$I$', zorder=6)
if (any(Sseries) and plot_S == 'line'):
ax.plot(numpy.ma.masked_where(Sseries <= 0, self.tseries), numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S, label='$S$', zorder=6)
if (any(Rseries) and plot_R == 'line'):
ax.plot(numpy.ma.masked_where(Rseries <= 0, self.tseries), numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R, label='$R$', zorder=6)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (len(vlines) > 0 and len(vline_colors) == 0):
vline_colors = ['gray'] * len(vlines)
if (len(vlines) > 0 and len(vline_labels) == 0):
vline_labels = [None] * len(vlines)
if (len(vlines) > 0 and len(vline_styles) == 0):
vline_styles = [':'] * len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if (vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if (plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if (legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none',
framealpha=0.9, prop={'size': 8})
if (title):
ax.set_title(title, size=12)
if (side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line', plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12, 8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if (use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I, plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title,
plot_percentages=plot_percentages)
if (show):
pyplot.show()
return fig, ax
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked', plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12, 8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if (use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I, plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title,
plot_percentages=plot_percentages)
if (show):
pyplot.show()
return fig, ax
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#################################################################################################################################################
# Main stochastic model
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
p_extern Probability of spontaneous infection
p_periodic Prob of period test
period Period for resting
batch True - periodic testing done in batches of p_periodic*numNodes new people
min_time : minimum time period to pass between testing same person twice
count_non_random : count tests apart from random routine tests
policy: policy function
"""
def policy(self):
pass
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False, p_extern=0, p_periodic=0, period=1, batch=True, min_time=1,
count_non_random=False, policy=None, test_recovered=False, initT=0):
self.has_policy = False
if policy:
self.has_policy = True
policy.__name__ = "policy"
self.policy = types.MethodType(policy, self)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if (Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = {'beta': beta, 'sigma': sigma, 'gamma': gamma, 'xi': xi, 'mu_I': mu_I, 'mu_0': mu_0, 'nu': nu,
'beta_D': beta_D, 'sigma_D': sigma_D, 'gamma_D': gamma_D, 'mu_D': mu_D,
'beta_local': beta_local, 'beta_D_local': beta_D_local, 'p': p, 'q': q,
'theta_E': theta_E, 'theta_I': theta_I, 'phi_E': phi_E, 'phi_I': phi_I, 'psi_E': psi_E,
'psi_I': psi_I,
'p_extern': p_extern, 'p_periodic': p_periodic, "period": period, "batch": batch,
"min_time": min_time,
"count_non_random": count_non_random, "test_recovered": test_recovered}
self.init_parameters = dict(self.parameters)
self.update_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5 * self.numNodes)
self.numE = numpy.zeros(5 * self.numNodes)
self.numI = numpy.zeros(5 * self.numNodes)
self.numD_E = numpy.zeros(5 * self.numNodes)
self.numD_I = numpy.zeros(5 * self.numNodes)
self.numR = numpy.zeros(5 * self.numNodes)
self.numF = numpy.zeros(5 * self.numNodes)
self.numS = numpy.zeros(5 * self.numNodes)
self.N = numpy.zeros(5 * self.numNodes)
self.numTested = numpy.zeros(5 * self.numNodes)
self.numPositive = numpy.zeros(5 * self.numNodes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if isinstance(initT, (int, float)):
self.t = initT
else:
self.t = random.uniform(initT[0], initT[1])
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = self.t
self.wait_until_t = 0
self.currentR = 0
self.nodeToTest = 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - \
self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array(
[self.S] * int(self.numS[0]) + [self.E] * int(self.numE[0]) + [self.I] * int(self.numI[0]) + [
self.D_E] * int(self.numD_E[0]) + [self.D_I] * int(self.numD_I[0]) + [self.R] * int(self.numR[0]) + [
self.F] * int(self.numF[0])).reshape((self.numNodes, 1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if (store_Xseries):
self.Xseries = numpy.zeros(shape=(5 * self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0, :] = self.X.T
self.transitions = {
'StoE': {'currentState': self.S, 'newState': self.E},
'EtoI': {'currentState': self.E, 'newState': self.I},
'ItoR': {'currentState': self.I, 'newState': self.R},
'ItoF': {'currentState': self.I, 'newState': self.F},
'RtoS': {'currentState': self.R, 'newState': self.S},
'EtoDE': {'currentState': self.E, 'newState': self.D_E},
'ItoDI': {'currentState': self.I, 'newState': self.D_I},
'DEtoDI': {'currentState': self.D_E, 'newState': self.D_I},
'DItoR': {'currentState': self.D_I, 'newState': self.R},
'DItoF': {'currentState': self.D_I, 'newState': self.F},
'_toS': {'currentState': True, 'newState': self.S},
'StoNS': {'currentState': self.S, 'newState': self.S}
}
self.last_tested = numpy.zeros((self.numNodes, 1)) - 100 # everybody has a fake last tested time of -100 days
self.time_detected = 0
self.small_step = False
self.count_non_random = count_non_random
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if (node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape(
(self.numNodes, 1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numI'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numTested'] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.E)
self.nodeGroupData[groupName]['numI'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.I)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.D_I)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]['mask'] * self.X == self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + \
self.nodeGroupData[groupName]['numE'][0] + \
self.nodeGroupData[groupName]['numI'][0] + \
self.nodeGroupData[groupName]['numD_E'][0] + \
self.nodeGroupData[groupName]['numD_I'][0] + \
self.nodeGroupData[groupName]['numR'][0]
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'],
shape=(self.numNodes, 1))
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'],
shape=(self.numNodes, 1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'],
shape=(self.numNodes, 1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes, 1))
self.mu_I = numpy.array(self.parameters['mu_I']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['mu_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_I'],
shape=(self.numNodes, 1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'],
shape=(self.numNodes, 1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes, 1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes, 1))
self.p_extern = numpy.array(self.parameters['p_extern']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['p_extern'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p_extern'],
shape=(self.numNodes, 1))
# Testing-related parameters:
self.beta_D = (
numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'],
(list,
numpy.ndarray)) else numpy.full(
fill_value=self.parameters['beta_D'], shape=(self.numNodes, 1))) if self.parameters[
'beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'],
shape=(self.numNodes, 1))) if \
self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (numpy.array(self.parameters['gamma_D']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['gamma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D'],
shape=(self.numNodes, 1))) if \
self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (
numpy.array(self.parameters['mu_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_D'], shape=(self.numNodes, 1))) if \
self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'],
shape=(self.numNodes, 1))
self.theta_I = numpy.array(self.parameters['theta_I']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['theta_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_I'],
shape=(self.numNodes, 1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'],
shape=(self.numNodes, 1))
self.phi_I = numpy.array(self.parameters['phi_I']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['phi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_I'],
shape=(self.numNodes, 1))
self.psi_E = numpy.array(self.parameters['psi_E']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['psi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_E'],
shape=(self.numNodes, 1))
self.psi_I = numpy.array(self.parameters['psi_I']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['psi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_I'],
shape=(self.numNodes, 1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes, 1))
self.min_time = numpy.array(self.parameters['min_time']).reshape((self.numNodes, 1)) if isinstance(
self.parameters['min_time'], (
list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['min_time'], shape=(self.numNodes, 1))
self.p_periodic = self.parameters['p_periodic']
# numpy.array(self.parameters['p_periodic']).reshape((self.numNodes, 1)) if isinstance(
# self.parameters['p_periodic'], (list, numpy.ndarray)) else self.parameters['p_periodic']
# numpy.full(fill_value=self.parameters['p_periodic'], shape=(self.numNodes, 1))
self.period = self.parameters['period']
self.batch = self.parameters['batch']
self.count_non_random = self.parameters['count_non_random']
self.test_recovered = self.parameters['test_recovered']
# Local transmission parameters:
if (self.parameters['beta_local'] is not None):
if (isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if (isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if (self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif (self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
# ----------------------------------------
if (self.parameters['beta_D_local'] is not None):
if (isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if (isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if (self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif (self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if (self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A,
numpy.tile(self.beta_local, (1, self.numNodes))).tocsr()
elif (self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if (self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local,
(1, self.numNodes))).tocsr()
elif (self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes, 1) # sums of adj matrix cols
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G) == numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G) == networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q) == numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q) == networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert (
self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ((numpy.any(self.psi_I) and (numpy.any(self.theta_I) or
|
numpy.any(self.phi_I)
|
numpy.any
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (run_module_suite, TestCase, assert_equal,
assert_allclose, assert_raises, assert_)
from numpy.testing.decorators import knownfailureif
from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,
sproot, splint, insert)
import scipy.linalg as sl
from scipy.interpolate._bsplines import _not_a_knot, _augknt
import scipy.interpolate._fitpack_impl as _impl
class TestBSpline(TestCase):
def test_ctor(self):
# knots should be an ordered 1D array of finite real numbers
assert_raises((TypeError, ValueError), BSpline,
**dict(t=[1, 1.j], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
# for n+k+1 knots and degree k need at least n coefficients
assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
assert_raises(ValueError, BSpline,
**dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
# non-integer orders
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
# basic inteval cannot have measure zero (here: [1..1])
assert_raises(ValueError, BSpline,
**dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
# tck vs self.tck
n, k = 11, 3
t = np.arange(n+k+1)
c = np.random.random(n)
b = BSpline(t, c, k)
assert_allclose(t, b.t)
assert_allclose(c, b.c)
assert_equal(k, b.k)
def test_tck(self):
b = _make_random_spline()
tck = b.tck
assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
assert_equal(b.k, tck[2])
# b.tck is read-only
try:
b.tck = 'foo'
except AttributeError:
pass
except:
raise AssertionError("AttributeError not raised.")
def test_degree_0(self):
xx = np.linspace(0, 1, 10)
b = BSpline(t=[0, 1], c=[3.], k=0)
assert_allclose(b(xx), 3)
b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
def test_degree_1(self):
t = [0, 1, 2, 3, 4]
c = [1, 2, 3]
k = 1
b = BSpline(t, c, k)
x = np.linspace(1, 3, 50)
assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
b(x), atol=1e-14)
assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
def test_bernstein(self):
# a special knot vector: Bernstein polynomials
k = 3
t = np.asarray([0]*(k+1) + [1]*(k+1))
c = np.asarray([1., 2., 3., 4.])
bp = BPoly(c.reshape(-1, 1), [0, 1])
bspl = BSpline(t, c, k)
xx = np.linspace(-1., 2., 10)
assert_allclose(bp(xx, extrapolate=True),
bspl(xx, extrapolate=True), atol=1e-14)
assert_allclose(splev(xx, (t, c, k)),
bspl(xx), atol=1e-14)
def test_rndm_naive_eval(self):
# test random coefficient spline *on the base interval*,
# t[k] <= x < t[-k-1]
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
y_b = b(xx)
y_n = [_naive_eval(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n, atol=1e-14)
y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n2, atol=1e-14)
def test_rndm_splev(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
def test_rndm_splrep(self):
np.random.seed(1234)
x = np.sort(np.random.random(20))
y = np.random.random(20)
tck = splrep(x, y)
b = BSpline(*tck)
t, k = b.t, b.k
xx = np.linspace(t[k], t[-k-1], 80)
assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
def test_rndm_unity(self):
b = _make_random_spline()
b.c = np.ones_like(b.c)
xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
assert_allclose(b(xx), 1.)
def test_vectorization(self):
n, k = 22, 3
t = np.sort(np.random.random(n))
c = np.random.random(size=(n, 6, 7))
b = BSpline(t, c, k)
tm, tp = t[k], t[-k-1]
xx = tm + (tp - tm) * np.random.random((3, 4, 5))
assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
def test_len_c(self):
# for n+k+1 knots, only first n coefs are used.
# and BTW this is consistent with FITPACK
n, k = 33, 3
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
# pad coefficients with random garbage
c_pad = np.r_[c, np.random.random(k+1)]
b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
dt = t[-1] - t[0]
xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
assert_allclose(b(xx), b_pad(xx), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
def test_endpoints(self):
# base interval is closed
b = _make_random_spline()
t, _, k = b.tck
tm, tp = t[k], t[-k-1]
for extrap in (True, False):
assert_allclose(b([tm, tp], extrap),
b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
def test_continuity(self):
# assert continuity at internal knots
b = _make_random_spline()
t, _, k = b.tck
assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
atol=1e-9)
def test_extrap(self):
b = _make_random_spline()
t, c, k = b.tck
dt = t[-1] - t[0]
xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
mask = (t[k] < xx) & (xx < t[-k-1])
# extrap has no effect within the base interval
assert_allclose(b(xx[mask], extrapolate=True),
b(xx[mask], extrapolate=False))
# extrapolated values agree with FITPACK
assert_allclose(b(xx, extrapolate=True),
splev(xx, (t, c, k), ext=0))
def test_default_extrap(self):
# BSpline defaults to extrapolate=True
b = _make_random_spline()
t, _, k = b.tck
xx = [t[0] - 1, t[-1] + 1]
yy = b(xx)
assert_(not np.all(np.isnan(yy)))
def test_ppoly(self):
b = _make_random_spline()
t, c, k = b.tck
pp = PPoly.from_spline((t, c, k))
xx = np.linspace(t[k], t[-k], 100)
assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
def test_derivative_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[0], t[-1], 50)
xx = np.r_[xx, t]
for der in range(1, k+1):
yd = splev(xx, (t, c, k), der=der)
assert_allclose(yd, b(xx, nu=der), atol=1e-14)
# higher derivatives all vanish
assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
def test_derivative_jumps(self):
# example from de Boor, Chap IX, example (24)
# NB: knots augmented & corresp coefs are zeroed out
# in agreement with the convention (29)
k = 2
t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
np.random.seed(1234)
c = np.r_[0, 0, np.random.random(5), 0, 0]
b = BSpline(t, c, k)
# b is continuous at x != 6 (triple knot)
x = np.asarray([1, 3, 4, 6])
assert_allclose(b(x[x != 6] - 1e-10),
b(x[x != 6] + 1e-10))
assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
# 1st derivative jumps at double knots, 1 & 6:
x0 = np.asarray([3, 4])
assert_allclose(b(x0 - 1e-10, nu=1),
b(x0 + 1e-10, nu=1))
x1 = np.asarray([1, 6])
assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
b(x1 + 1e-10, nu=1))))
# 2nd derivative is not guaranteed to be continuous either
assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
b(x + 1e-10, nu=2))))
def test_basis_element_quadratic(self):
xx = np.linspace(-1, 4, 20)
b = BSpline.basis_element(t=[0, 1, 2, 3])
assert_allclose(b(xx),
splev(xx, (b.t, b.c, b.k)), atol=1e-14)
assert_allclose(b(xx),
B_0123(xx), atol=1e-14)
b = BSpline.basis_element(t=[0, 1, 1, 2])
xx = np.linspace(0, 2, 10)
assert_allclose(b(xx),
np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
def test_basis_element_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
def test_cmplx(self):
b = _make_random_spline()
t, c, k = b.tck
cc = c * (1. + 3.j)
b = BSpline(t, cc, k)
b_re = BSpline(t, b.c.real, k)
b_im = BSpline(t, b.c.imag, k)
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
def test_nan(self):
# nan in, nan out.
b = BSpline.basis_element([0, 1, 1, 2])
assert_(np.isnan(b(np.nan)))
def test_derivative_method(self):
b = _make_random_spline(k=5)
t, c, k = b.tck
b0 = BSpline(t, c, k)
xx = np.linspace(t[k], t[-k-1], 20)
for j in range(1, k):
b = b.derivative()
assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
def test_antiderivative_method(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
# repeat with n-D array for c
c = np.c_[c, c, c]
c = np.dstack((c, c))
b = BSpline(t, c, k)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
def test_integral(self):
b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
assert_allclose(b.integrate(0, 1), 0.5)
assert_allclose(b.integrate(1, 0), -0.5)
# extrapolate or zeros outside of [0, 2]; default is yes
assert_allclose(b.integrate(-1, 1), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
def test_subclassing(self):
# classmethods should not decay to the base class
class B(BSpline):
pass
b = B.basis_element([0, 1, 2, 2])
assert_equal(b.__class__, B)
assert_equal(b.derivative().__class__, B)
assert_equal(b.antiderivative().__class__, B)
def test_axis(self):
n, k = 22, 3
t = np.linspace(0, 1, n + k + 1)
sh0 = [6, 7, 8]
for axis in range(4):
sh = sh0[:]
sh.insert(axis, n) # [22, 6, 7, 8] etc
c = np.random.random(size=sh)
b = BSpline(t, c, k, axis=axis)
assert_equal(b.c.shape,
[sh[axis],] + sh[:axis] + sh[axis+1:])
xp = np.random.random((3, 4, 5))
assert_equal(b(xp).shape,
sh[:axis] + list(xp.shape) + sh[axis+1:])
#0 <= axis < c.ndim
for ax in [-1, len(sh)+1]:
assert_raises(ValueError, BSpline, **dict(t=t, c=c, k=k, axis=ax))
# derivative, antiderivative keeps the axis
for b1 in [BSpline(t, c, k, axis=axis).derivative(),
BSpline(t, c, k, axis=axis).derivative(2),
BSpline(t, c, k, axis=axis).antiderivative(),
BSpline(t, c, k, axis=axis).antiderivative(2)]:
assert_equal(b1.axis, b.axis)
def test_knots_multiplicity():
# Take a spline w/ random coefficients, throw in knots of varying
# multiplicity.
def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
# check evaluations against FITPACK, incl extrapolations
t, c, k = b.tck
x = np.unique(t)
x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
assert_allclose(splev(x, (t, c, k), der), b(x, der),
atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k))
# test loop itself
# [the index `j` is for interpreting the traceback in case of a failure]
for k in [1, 2, 3, 4, 5]:
b = _make_random_spline(k=k)
for j, b1 in enumerate(_make_multiples(b)):
yield check_splev, b1, j
for der in range(1, k+1):
yield check_splev, b1, j, der, 1e-12, 1e-12
### stolen from @pv, verbatim
def _naive_B(x, k, i, t):
"""
Naive way to compute B-spline basis functions. Useful only for testing!
computes B(x; t[i],..., t[i+k+1])
"""
if k == 0:
return 1.0 if t[i] <= x < t[i+1] else 0.0
if t[i+k] == t[i]:
c1 = 0.0
else:
c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
if t[i+k+1] == t[i+1]:
c2 = 0.0
else:
c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
return (c1 + c2)
### stolen from @pv, verbatim
def _naive_eval(x, t, c, k):
"""
Naive B-spline evaluation. Useful only for testing!
"""
if x == t[k]:
i = k
else:
i = np.searchsorted(t, x) - 1
assert t[i] <= x <= t[i+1]
assert i >= k and i < len(t) - k
return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
def _naive_eval_2(x, t, c, k):
"""Naive B-spline evaluation, another way."""
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
assert t[k] <= x <= t[n]
return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
def _sum_basis_elements(x, t, c, k):
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
s = 0.
for i in range(n):
b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
return s
def B_012(x):
""" A linear B-spline function B(x | 0, 1, 2)."""
x = np.atleast_1d(x)
return np.piecewise(x, [(x < 0) | (x > 2),
(x >= 0) & (x < 1),
(x >= 1) & (x <= 2)],
[lambda x: 0., lambda x: x, lambda x: 2.-x])
def B_0123(x, der=0):
"""A quadratic B-spline function B(x | 0, 1, 2, 3)."""
x = np.atleast_1d(x)
conds = [x < 1, (x > 1) & (x < 2), x > 2]
if der == 0:
funcs = [lambda x: x*x/2.,
lambda x: 3./4 - (x-3./2)**2,
lambda x: (3.-x)**2 / 2]
elif der == 2:
funcs = [lambda x: 1.,
lambda x: -2.,
lambda x: 1.]
else:
raise ValueError('never be here: der=%s' % der)
pieces = np.piecewise(x, conds, funcs)
return pieces
def _make_random_spline(n=35, k=3):
np.random.seed(123)
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
return BSpline.construct_fast(t, c, k)
def _make_multiples(b):
"""Increase knot multiplicity."""
c, k = b.c, b.k
t1 = b.t.copy()
t1[17:19] = t1[17]
t1[22] = t1[21]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[:k+1] = t1[0]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[-k-1:] = t1[-1]
yield BSpline(t1, c, k)
class TestInterop(object):
#
# Test that FITPACK-based spl* functions can deal with BSpline objects
#
def __init__(self):
xx =
|
np.linspace(0, 4.*np.pi, 41)
|
numpy.linspace
|
from .loader import Loader
import tensorflow as tf
import threading
import numpy as np
import time
import glob
import os
import imageio
import cv2
import deepdish as dd
SAMPLES_PER_VIDEO = 1
SAMPLES_PER_FRAME = 1
FRAMES = 6
def pad(x, min_side):
if np.min(x.shape[:2]) >= min_side:
return x
else:
sh = (max(min_side, x.shape[0]), max(min_side, x.shape[1])) + x.shape[2:]
new_x = np.zeros(sh, dtype=x.dtype)
new_x[:x.shape[0], :x.shape[1]] = x
return new_x
def extract_optical_flow(fn, n_frames=34):
img = dd.image.load(fn)
if img.shape != (128*34, 128, 3):
return []
frames = np.array_split(img, 34, axis=0)
grayscale_frames = [fr.mean(-1) for fr in frames]
mags = []
skip_frames = np.random.randint(34 - n_frames + 1)
middle_frame = frames[np.random.randint(skip_frames, skip_frames+n_frames)]
im0 = grayscale_frames[skip_frames]
for f in range(1+skip_frames, 1+skip_frames+n_frames-1):
im1 = grayscale_frames[f]
flow = cv2.calcOpticalFlowFarneback(im0, im1,
None, # flow
0.5, # pyr_scale
3, # levels
np.random.randint(3, 20), # winsize
3, #iterations
5, #poly_n
1.2, #poly_sigma
0 # flags
)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
mags.append(mag)
im0 = im1
mag =
|
np.sum(mags, 0)
|
numpy.sum
|
# *****************************COPYRIGHT******************************
# (C) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file LICENCE.txt
# which you should have received as part of this distribution.
# *****************************COPYRIGHT******************************
#
# This file is part of Mule.
#
# Mule is free software: you can redistribute it and/or modify it under
# the terms of the Modified BSD License, as published by the
# Open Source Initiative.
#
# Mule is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Modified BSD License for more details.
#
# You should have received a copy of the Modified BSD License
# along with Mule. If not, see <http://opensource.org/licenses/BSD-3-Clause>.
"""
This module provides tools for interacting with "pp" files.
"""
from __future__ import (absolute_import, division, print_function)
import six
import mule
import numpy as np
GRID_STAGGER = {3: "new_dynamics", 6: "endgame"}
# Borrow the Mule field class, since a Field in a pp file is essentially the
# same as a Field in a UM file; but adjust the data types since it is 32-bit
class PPField(mule.Field):
DTYPE_INT = ">i4"
DTYPE_REAL = ">f4"
# As above but with header release 2 headers
class PPField2(PPField, mule.Field2):
pass
# As above but with header release 3 headers
class PPField3(PPField, mule.Field3):
pass
# Mapping to go from release number to field object
FIELD_SELECT = {2: PPField2, 3: PPField3}
# Create mappings for the lbpack n3-n1 digits (similar to how the mule file
# classes contain mappings like these). The only real difference is that the
# "Unpacked" provider uses the 32-bit class (since PP files are 32-bit)
_READ_PROVIDERS = {
"000": mule.ff._ReadFFProviderCray32Packed,
"001": mule.ff._ReadFFProviderWGDOSPacked,
}
_WRITE_OPERATORS = {
"000": mule.ff._WriteFFOperatorCray32Packed(),
"001": mule.ff._WriteFFOperatorWGDOSPacked(),
}
def file_is_pp_file(file_path):
"""
Checks to see if a given file is a pp file.
Args:
* file_path:
Path to the file to be checked.
Returns:
* True if file is a pp file, False otherwise.
"""
# The logic behind this is that the first 32-bit word of a pp file should
# be the record length of the first record (a lookup entry). Since this
# has 64, 4-byte words we check to see if it is 64*4 = 256. In a regular
# UM File the first 64-bit word should be either 15, 20 or IMDI, and in
# each of these cases it is not possible for the first half of the word
# to be 256, making this a safe way to detect a pp file.
first_word = np.fromfile(file_path, dtype=">i4", count=1)
return first_word == 256
def fields_from_pp_file(pp_file_obj_or_path):
"""
Reads in a PP file as a list of field objects.
Args:
* pp_file_obj_or_path:
Either an (opened) file object, or the path
to a file containing the pp data.
Returns:
* pp_fields
List of :class:`mule.pp.PPField` objects.
"""
if isinstance(pp_file_obj_or_path, six.string_types):
pp_file = open(pp_file_obj_or_path, "rb")
else:
pp_file = pp_file_obj_or_path
field_count = 0
fields = []
while True:
# Increment counter
field_count += 1
# Read the record length
reclen = np.fromfile(pp_file, ">i4", 1)
# Check for end of file
if len(reclen) == 0:
break
else:
reclen = reclen[0]
if reclen != 256:
msg = "Field {0}; Incorrectly sized lookup record: {1}"
raise ValueError(msg.format(field_count, reclen))
# Read the record (the header)
ints = np.fromfile(pp_file, ">i4", mule.Field.NUM_LOOKUP_INTS)
reals = np.fromfile(pp_file, ">f4", mule.Field.NUM_LOOKUP_REALS)
# Read the check record
reclen_check = np.fromfile(pp_file, ">i4", 1)[0]
# They should match
if reclen != reclen_check:
msg = "Field {0}; Inconsistent header record lengths: {1} and {2}"
raise ValueError(msg.format(field_count, reclen, reclen_check))
# Load into the basic field class
field_ref = PPField(ints, reals, None)
# Use the release number to select a better class if possible
fclass = FIELD_SELECT.get(field_ref.lbrel, None)
if fclass is not None:
field_ref = fclass(ints, reals, None)
# Read the record length for the data
reclen = np.fromfile(pp_file, ">i4", 1)[0]
# This should be equivalent to lbnrec, but can sometimes be set to
# zero... so to allow the existing provider to work add this value
# to the reference field's headers
field_ref.lbnrec = reclen//4
# Associate the provider
offset = pp_file.tell()
# Strip just the n1-n3 digits from the lbpack value
# and check for a suitable write operator
lbpack321 = "{0:03d}".format(field_ref.lbpack -
((field_ref.lbpack//1000) % 10)*1000)
if lbpack321 not in _READ_PROVIDERS:
msg = "Field{0}; Cannot interpret unsupported packing code {1}"
raise ValueError(msg.format(field_count, lbpack321))
provider = _READ_PROVIDERS[lbpack321](field_ref, pp_file, offset)
field = type(field_ref)(ints, reals, provider)
# Now check if the field contains extra data
if field.lbext > 0:
# Skip past the field data only (relative seek to avoid overflows)
pp_file.seek((field.lblrec - field.lbext)*4, 1)
# Save the current file position
start = pp_file.tell()
# Now load in the vectors as they are encountered until the
# end of the record is reached
vectors = {}
ext_consumed = 0
while pp_file.tell() - start < field.lbext*4:
# First read the code
vector_code =
|
np.fromfile(pp_file, ">i4", 1)
|
numpy.fromfile
|
import torch
import torch.utils.data as data
import numpy as np
import os, sys, h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
def _get_data_files(list_filename):
with open(list_filename) as f:
# return [line.rstrip()[5:] for line in f]
return np.array([line.rstrip() for line in f])
def _load_data_file(name):
f = h5py.File(name)
data = f['data'][:]
label = f['label'][:]
return data, label
def _get_point_file(point_filename):
with open(point_filename) as f:
return np.array([line.rstrip().split() for line in f])
def _split_data(data, label, val=False):
# num_example = data.shape[0]
num_example = len(data)
arr = np.arange(num_example)
np.random.shuffle(arr)
data, label = (data[arr], label[arr])
if val:
ratio0, ratio1 =0.8, 0.9
s0 = np.int(num_example*ratio0)
s1 = np.int(num_example*ratio1)
# samples splited
x_train = data[:s0]
y_train = label[:s0]
x_val = data[s0:s1]
y_val = label[s0:s1]
x_test = data[s1:]
y_test = label[s1:]
return x_train, y_train, x_val, y_val, x_test, y_test
else:
ratio = 0.9
s = np.int(num_example*ratio)
x_train = data[:s]
y_train = label[:s]
x_test = data[s:]
y_test = label[s:]
return x_train, y_train, x_test, y_test
def getDataFiles(list_filename):
# return [line.rstrip() for line in open(list_filename)]
files = [line.rstrip() for line in open(list_filename)]
trian_files = files[:-2]
test_file = []
test_f = files[-1]
test_file.append(test_f)
return trian_files, test_file
class ModelNet40Cls(data.Dataset):
def __init__(
self, num_points, root, transforms=None, train=True):
super().__init__()
self.transforms = transforms
root = os.path.abspath(root)
self.folder = "modelnet40_ply_hdf5_2048"
self.data_dir = os.path.join(root, self.folder)
self.train, self.num_points = train, num_points
if self.train:
self.files = _get_data_files( \
os.path.join(self.data_dir, 'train_files.txt'))
else:
self.files = _get_data_files( \
os.path.join(self.data_dir, 'test_files.txt'))
point_list, label_list = [], []
for f in self.files:
points, labels = _load_data_file(os.path.join(root, f))
point_list.append(points)
label_list.append(labels)
self.points = np.concatenate(point_list, 0)
self.labels =
|
np.concatenate(label_list, 0)
|
numpy.concatenate
|
import numpy as np, pandas as pd
from scipy.sparse import csc_matrix, csr_matrix, issparse, isspmatrix_csc, isspmatrix_csr, vstack as sp_vstack
import warnings
import multiprocessing
import ctypes
import json
from copy import deepcopy
from ._cpp_interface import isoforest_cpp_obj, _sort_csc_indices, _reconstruct_csr_sliced, _reconstruct_csr_with_categ
__all__ = ["IsolationForest"]
### Helpers
def _get_num_dtype(X_num=None, sample_weights=None, column_weights=None):
if X_num is not None:
return np.empty(0, dtype=X_num.dtype)
elif sample_weights is not None:
return np.empty(0, dtype=column_weights.dtype)
elif column_weights is not None:
return np.empty(0, dtype=sample_weights.dtype)
else:
return np.empty(0, dtype=ctypes.c_double)
def _get_int_dtype(X_num):
if (X_num is not None) and (issparse(X_num)):
return np.empty(0, dtype=X_num.indices.dtype)
else:
return np.empty(0, dtype=ctypes.c_size_t)
def _is_row_major(X_num):
if (X_num is None) or (issparse(X_num)):
return False
else:
return X_num.strides[1] == X_num.dtype.itemsize
def _is_col_major(X_num):
if (X_num is None) or (issparse(X_num)):
return False
else:
return X_num.strides[0] == X_num.dtype.itemsize
def _copy_if_subview(X_num):
### TODO: the C++ functions should accept a 'leading dimension'
### parameter so as to avoid copying the data here
if (X_num is not None) and (not issparse(X_num)):
col_major = _is_col_major(X_num)
leading_dimension = int(X_num.strides[1 if col_major else 0] / X_num.dtype.itemsize)
if (
(leading_dimension != X_num.shape[0 if col_major else 1]) or
(len(X_num.strides) != 2) or
(not X_num.flags.aligned) or
(not _is_row_major(X_num) and not _is_col_major(X_num))
):
X_num = X_num.copy()
if _is_col_major(X_num) != col_major:
X_num = np.asfortranarray(X_num)
return X_num
class IsolationForest:
"""
Isolation Forest model
Isolation Forest is an algorithm originally developed for outlier detection that consists in splitting
sub-samples of the data according to some attribute/feature/column at random. The idea is that, the rarer
the observation, the more likely it is that a random uniform split on some feature would put outliers alone
in one branch, and the fewer splits it will take to isolate an outlier observation like this. The concept
is extended to splitting hyperplanes in the extended model (i.e. splitting by more than one column at a time), and to
guided (not entirely random) splits in the SCiForest model that aim at isolating outliers faster and
finding clustered outliers.
This version adds heuristics to handle missing data and categorical variables. Can be used to aproximate pairwise
distances by checking the depth after which two observations become separated, and to approximate densities by fitting
trees beyond balanced-tree limit. Offers options to vary between randomized and deterministic splits too.
Note
----
The default parameters in this software do not correspond to the suggested parameters in
any of the references.
In particular, the following default values are likely to cause huge differences when compared to the
defaults in other software: ``ndim``, ``sample_size``, ``ntrees``. The defaults here are
nevertheless more likely to result in better models. In order to mimic scikit-learn for example, one
would need to pass ``ndim=1``, ``sample_size=256``, ``ntrees=100``, ``missing_action="fail"``, ``nthreads=1``.
Note
----
The model offers many tunable parameters. The most likely candidate to tune is
``prob_pick_pooled_gain``, for which higher values tend to
result in a better ability to flag outliers in the training data at the expense of hindered
performance when making predictions on new data (calling method ``predict``) and poorer
generalizability to inputs with values outside the variables' ranges to which the model was fit
(see plots generated from the examples in GitHub notebook for a better idea of the difference). The next candidate to tune is
``sample_size`` - the default is to use all rows, but in some datasets introducing sub-sampling can help,
especially for the single-variable model. In smaller datasets, one might also want to experiment
with ``weigh_by_kurtosis`` and perhaps lower ``ndim``.
Note
----
The default parameters will not scale to large datasets. In particular,
if the amount of data is large, it's suggested to set a smaller sample size for each tree (parameter ``sample_size``)
and to fit fewer of them (parameter ``ntrees``).
As well, the default option for 'missing_action' might slow things down significantly.
See the documentation of the parameters for more details.
These defaults can also result in very big model sizes in memory and as serialized
files (e.g. models that weight over 10GB) when the number of rows in the data is large.
Using fewer trees, smaller sample sizes, and shallower trees can help to reduce model
sizes if that becomes a problem.
Note
----
When using more than one dimension for splits (i.e. splitting hyperplanes, see ``ndim``) and when
calculating gain, the variables are standardized at each step, so there is no need to center/scale the
data beforehand. The gain calculations are also standardized according to the standard deviation when
using ``ntry>1`` or ``ndim==1``, in order to avoid differences in the magnitudes of the coefficients.
Parameters
----------
sample_size : int, float(0,1), or None
Sample size of the data sub-samples with which each binary tree will be built. If passing 'None', each
tree will be built using the full data. Recommended value in [1], [2], [3] is 256, while
the default value in the author's code in [5] is 'None' here.
If passing a number between zero and one, will assume it means taking a sample size that represents
that proportion of the rows in the data.
Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the
model needs more trees and/or bigger samples to reach convergence (unless using non-random
splits, in which case the distribution is likely to be centered around a much lower number),
or that the distributions in the data are too skewed for random uniform splits.
ntrees : int
Number of binary trees to build for the model. Recommended value in [1] is 100, while the default value in the
author's code in [5] is 10. In general, the number of trees required for good results
is higher when (a) there are many columns, (b) there are categorical variables, (c) categorical variables have many
categories, (d) `ndim` is high.
Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the
model needs more trees and/or bigger samples to reach convergence (unless using non-random
splits, in which case the distribution is likely to be centered around a much lower number),
or that the distributions in the data are too skewed for random uniform splits.
ndim : int
Number of columns to combine to produce a split. If passing 1, will produce the single-variable model described
in [1] and [2], while if passing values greater than 1, will produce the extended model described in [3] and [4].
Recommended value in [4] is 2, while [3] recommends a low value such as 2 or 3. Models with values higher than 1
are referred hereafter as the extended model (as in [3]).
Note that, when using ``ndim>1``, the variables are standardized at each step as suggested in [4],
which makes the models slightly different than in [3].
ntry : int
In the extended model with non-random splits, how many random combinations to try for determining the best gain.
Only used when deciding splits by gain (see documentation for parameters 'prob_pick_avg_gain' and 'prob_pick_pooled_gain').
Recommended value in [4] is 10. Ignored for single-variable model.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero.
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype (see the documentation for ``fit`` for details).
max_depth : int, None, or str "auto"
Maximum depth of the binary trees to grow. If passing None, will build trees until each observation ends alone
in a terminal node or until no further split is possible. If using "auto", will limit it to the corresponding
depth of a balanced binary tree with number of terminal nodes corresponding to the sub-sample size (the reason
being that, if trying to detect outliers, an outlier will only be so if it turns out to be isolated with shorter average
depth than usual, which corresponds to a balanced tree depth). When a terminal node has more than 1 observation, the
remaining isolation depth for them is estimated assuming the data and splits are both uniformly random (separation depth
follows a similar process with expected value calculated as in [6]). Default setting for [1], [2], [3], [4] is "auto",
but it's recommended to pass higher values if using the model for purposes other than outlier detection.
ncols_per_tree : None, int, or float(0,1)
Number of columns to use (have as potential candidates for splitting at each iteration) in each tree,
somewhat similar to the 'mtry' parameter of random forests.
In general, this is only relevant when using non-random splits and/or weighting by kurtosis.
If passing a number between zero and one, will assume it means taking a sample size that represents
that proportion of the columns in the data.
If passing ``None`` (the default), will use the full number of available columns.
prob_pick_avg_gain : float(0, 1)
* For the single-variable model (``ndim=1``), this parameter indicates the probability
of making each split by choosing a column and split point in that
same column as both the column and split point that gives the largest averaged gain (as proposed in [4]) across
all available columns and possible splits in each column. Note that this implies evaluating every single column
in the sample data when this type of split happens, which will potentially make the model fitting much slower,
but has no impact on prediction time. For categorical variables, will take the expected standard deviation that
would be gotten if the column were converted to numerical by assigning to each category a random number ~ Unif(0, 1)
and calculate gain with those assumed standard deviations.
* For the extended model, this parameter indicates the probability that the
split point in the chosen linear combination of variables will be decided by this averaged gain criterion.
Compared to a pooled average, this tends to result in more cases in which a single observation or very few of them
are put into one branch. Recommended to use sub-samples (parameter 'sample_size') when passing this parameter.
Note that, since this will create isolated nodes faster, the resulting object will be lighter (use less memory).
When splits are
not made according to any of 'prob_pick_avg_gain', 'prob_pick_pooled_gain', 'prob_split_avg_gain',
'prob_split_pooled_gain', both the column and the split point are decided at random. Default setting for [1], [2], [3] is
zero, and default for [4] is 1. This is the randomization parameter that can be passed to the author's original code in [5],
but note that the code in [5] suffers from a mathematical error in the calculation of running standard deviations,
so the results from it might not match with this library's.
Note that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable model, every single tree will have
the exact same splits.
Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of
outlier scores is unlikely to be centered around 0.5.
prob_pick_pooled_gain : float(0, 1)
* For the single-variable model (``ndim=1``), this parameter indicates the probability
of making each split by choosing a column and split point in that
same column as both the column and split point that gives the largest pooled gain (as used in decision tree
classifiers such as C4.5 in [7]) across all available columns and possible splits in each column. Note
that this implies evaluating every single column in the sample data when this type of split happens, which
will potentially make the model fitting much slower, but has no impact on prediction time. For categorical
variables, will use shannon entropy instead (like in [7]).
* For the extended model, this parameter indicates the probability
that the split point in the chosen linear combination of variables will be decided by this pooled gain
criterion.
Compared to a simple average, this tends to result in more evenly-divided splits and more clustered
groups when they are smaller. Recommended to pass higher values when used for imputation of missing values.
When used for outlier detection, higher values of this parameter result in models that are able to better flag
outliers in the training data of each tree, but generalize poorly to outliers in new data and to values of variables
outside of the ranges from the training data. Passing small 'sample_size' and high values of this parameter will
tend to flag too many outliers.
Note that, since this makes the trees more even and thus it takes more steps to produce isolated nodes,
the resulting object will be heavier. When splits are not made according to any of 'prob_pick_avg_gain',
'prob_pick_pooled_gain', 'prob_split_avg_gain', 'prob_split_pooled_gain', both the column and the split point
are decided at random. Note that, if passing value 1 (100%) with no sub-sampling and using the single-variable model,
every single tree will have the exact same splits.
Be aware that ``penalize_range`` can also have a large impact when using ``prob_pick_pooled_gain``.
Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of
outlier scores is unlikely to be centered around 0.5.
prob_split_avg_gain : float(0, 1)
Probability of making each split by selecting a column at random and determining the split point as
that which gives the highest averaged gain. Not supported for the extended model as the splits are on
linear combinations of variables. See the documentation for parameter 'prob_pick_avg_gain' for more details.
prob_split_pooled_gain : float(0, 1)
Probability of making each split by selecting a column at random and determining the split point as
that which gives the highest pooled gain. Not supported for the extended model as the splits are on
linear combinations of variables. See the documentation for parameter 'prob_pick_pooled_gain' for more details.
min_gain : float > 0
Minimum gain that a split threshold needs to produce in order to proceed with a split. Only used when the splits
are decided by a gain criterion (either pooled or averaged). If the highest possible gain in the evaluated
splits at a node is below this threshold, that node becomes a terminal node.
missing_action : str, one of "divide" (single-variable only), "impute", "fail", "auto"
How to handle missing data at both fitting and prediction time. Options are:
``"divide"``:
(For the single-variable model only, recommended) Will follow both branches and combine the result with the
weight given by the fraction of the data that went to each branch when fitting the model.
``"impute"``:
Will assign observations to the branch with the most observations in the single-variable model, or fill in
missing values with the median of each column of the sample from which the split was made in the extended
model (recommended for the extended model).
``"fail"``:
Will assume there are no missing values and will trigger undefined behavior if it encounters any.
``"auto"``:
Will use "divide" for the single-variable model and "impute" for the extended model.
In the extended model, infinite values will be treated as missing.
Passing "fail" will produce faster fitting and prediction times along with decreased
model object sizes.
Models from [1], [2], [3], [4] correspond to "fail" here.
new_categ_action : str, one of "weighted" (single-variable only), "impute" (extended only), "smallest", "random"
What to do after splitting a categorical feature when new data that reaches that split has categories that
the sub-sample from which the split was done did not have. Options are:
``"weighted"``:
(For the single-variable model only, recommended) Will follow both branches and combine the result with weight given
by the fraction of the data that went to each branch when fitting the model.
``"impute"``:
(For the extended model only, recommended) Will assign them the median value for that column that was added to the linear
combination of features.
``"smallest"``:
In the single-variable case will assign all observations with unseen categories in the split to the branch that had
fewer observations when fitting the model, and in the extended case will assign them the coefficient of the least
common category.
``"random"``:
Will assing a branch (coefficient in the extended model) at random for each category beforehand, even if no observations
had that category when fitting the model.
``"auto"``:
Will select "weighted" for the single-variable model and "impute" for the extended model.
Ignored when passing 'categ_split_type' = 'single_categ'.
categ_split_type : str, one of "subset" or "single_categ"
Whether to split categorical features by assigning sub-sets of them to each branch, or by assigning
a single category to a branch and the rest to the other branch. For the extended model, whether to
give each category a coefficient, or only one while the rest get zero.
all_perm : bool
When doing categorical variable splits by pooled gain with ``ndim=1`` (regular model),
whether to consider all possible permutations of variables to assign to each branch or not. If ``False``,
will sort the categories by their frequency and make a grouping in this sorted order. Note that the
number of combinations evaluated (if ``True``) is the factorial of the number of present categories in
a given column (minus 2). For averaged gain, the best split is always to put the second most-frequent
category in a separate branch, so not evaluating all permutations (passing ``False``) will make it
possible to select other splits that respect the sorted frequency order.
Ignored when not using categorical variables or not doing splits by pooled gain or using ``ndim > 1``.
coef_by_prop : bool
In the extended model, whether to sort the randomly-generated coefficients for categories
according to their relative frequency in the tree node. This might provide better results when using
categorical variables with too many categories, but is not recommended, and not reflective of
real "categorical-ness". Ignored for the regular model (``ndim=1``) and/or when not using categorical
variables.
recode_categ : bool
Whether to re-encode categorical variables even in case they are already passed
as ``pd.Categorical``. This is recommended as it will eliminate potentially redundant categorical levels if
they have no observations, but if the categorical variables are already of type ``pd.Categorical`` with only
the levels that are present, it can be skipped for slightly faster fitting times. You'll likely
want to pass ``False`` here if merging several models into one through ``append_trees``.
weights_as_sample_prob : bool
If passing sample (row) weights when fitting the model, whether to consider those weights as row
sampling weights (i.e. the higher the weights, the more likely the observation will end up included
in each tree sub-sample), or as distribution density weights (i.e. putting a weight of two is the same
as if the row appeared twice, thus higher weight makes it less of an outlier). Note that sampling weight
is only used when sub-sampling data for each tree, which is not the default in this implementation.
sample_with_replacement : bool
Whether to sample rows with replacement or not (not recommended). Note that distance calculations,
if desired, don't work well with duplicate rows.
penalize_range : bool
Whether to penalize (add -1 to the terminal depth) observations at prediction time that have a value
of the chosen split variable (linear combination in extended model) that falls outside of a pre-determined
reasonable range in the data being split (given by 2 * range in data and centered around the split point),
as proposed in [4] and implemented in the authors' original code in [5]. Not used in single-variable model
when splitting by categorical variables.
It's recommended to turn this off for faster predictions on sparse CSC matrices.
Note that this can make a very large difference in the results when using ``prob_pick_pooled_gain``.
Be aware that this option can make the distribution of outlier scores a bit different
(i.e. not centered around 0.5)
weigh_by_kurtosis : bool
Whether to weigh each column according to the kurtosis obtained in the sub-sample that is selected
for each tree as briefly proposed in [1]. Note that this is only done at the beginning of each tree
sample, so if not using sub-samples, it's better to pass column weights calculated externally. For
categorical columns, will calculate expected kurtosis if the column was converted to numerical by
assigning to each category a random number ~ Unif(0, 1).
Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that
uses sums of squares and higher-power numbers, which has less numerical precision than the
calculation used for dense inputs, and as such, the results might differ slightly.
Using this option makes the model more likely to pick the columns that have anomalous values
when viewed as a 1-d distribution, and can bring a large improvement in some datasets.
coefs : str, one of "normal" or "uniform"
For the extended model, whether to sample random coefficients according to a normal distribution ~ N(0, 1)
(as proposed in [3]) or according to a uniform distribution ~ Unif(-1, +1) as proposed in [4]. Ignored for the
single-variable model. Note that, for categorical variables, the coefficients will be sampled ~ N (0,1)
regardless - in order for both types of variables to have transformations in similar ranges (which will tend
to boost the importance of categorical variables), pass ``"uniform"`` here.
assume_full_distr : bool
When calculating pairwise distances (see [8]), whether to assume that the fitted model represents
a full population distribution (will use a standardizing criterion assuming infinite sample,
and the results of the similarity between two points at prediction time will not depend on the
prescence of any third point that is similar to them, but will differ more compared to the pairwise
distances between points from which the model was fit). If passing 'False', will calculate pairwise distances
as if the new observations at prediction time were added to the sample to which each tree was fit, which
will make the distances between two points potentially vary according to other newly introduced points.
This will not be assumed when the distances are calculated as the model is being fit (see documentation
for method 'fit_transform').
build_imputer : bool
Whether to construct missing-value imputers so that later this same model could be used to impute
missing values of new (or the same) observations. Be aware that this will significantly increase the memory
requirements and serialized object sizes. Note that this is not related to 'missing_action' as missing
values inside the model are treated differently and follow their own imputation or division strategy.
min_imp_obs : int
Minimum number of observations with which an imputation value can be produced. Ignored if passing
'build_imputer' = 'False'.
depth_imp : str, one of "higher", "lower", "same"
How to weight observations according to their depth when used for imputing missing values. Passing
"higher" will weigh observations higher the further down the tree (away from the root node) the
terminal node is, while "lower" will do the opposite, and "same" will not modify the weights according
to node depth in the tree. Implemented for testing purposes and not recommended to change
from the default. Ignored when passing 'build_imputer' = 'False'.
weigh_imp_rows : str, one of "inverse", "prop", "flat"
How to weight node sizes when used for imputing missing values. Passing "inverse" will weigh
a node inversely proportional to the number of observations that end up there, while "proportional"
will weight them heavier the more observations there are, and "flat" will weigh all nodes the same
in this regard regardless of how many observations end up there. Implemented for testing purposes
and not recommended to change from the default. Ignored when passing 'build_imputer' = 'False'.
random_seed : int
Seed that will be used for random number generation.
nthreads : int
Number of parallel threads to use. If passing a negative number, will use
the same formula as joblib does for calculating number of threads (which is
n_cpus + 1 + n_jobs - i.e. pass -1 to use all available threads). Note that, the more threads,
the more memory will be allocated, even if the thread does not end up being used.
Be aware that most of the operations are bound by memory bandwidth, which means that
adding more threads will not result in a linear speed-up. For some types of data
(e.g. large sparse matrices with small sample sizes), adding more threads might result
in only a very modest speed up (e.g. 1.5x faster with 4x more threads),
even if all threads look fully utilized.
n_estimators : None or int
Synonym for ``ntrees``, kept for better compatibility with scikit-learn.
max_samples : None or int
Synonym for ``sample_size``, kept for better compatibility with scikit-learn.
n_jobs : None or int
Synonym for ``nthreads``, kept for better compatibility with scikit-learn.
random_state : None, int, or RandomState
Synonym for ``random_seed``, kept for better compatibility with scikit-learn.
bootstrap : None or bool
Synonym for ``sample_with_replacement``, kept for better compatibility with scikit-learn.
Attributes
----------
cols_numeric_ : array(n_num_features,)
Array with the names of the columns that were taken as numerical
(Only when fitting the model to a DataFrame object).
cols_categ_ : array(n_categ_features,)
Array with the names of the columns that were taken as categorical
(Only when fitting the model to a DataFrame object).
is_fitted_ : bool
Indicator telling whether the model has been fit to data or not.
References
----------
.. [1] Liu, <NAME>, <NAME>, and <NAME>. "Isolation forest."
2008 Eighth IEEE International Conference on Data Mining. IEEE, 2008.
.. [2] Liu, <NAME>, <NAME>, and <NAME>. "Isolation-based anomaly detection."
ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3.
.. [3] Hariri, Sahand, <NAME>, and <NAME>. "Extended Isolation Forest."
arXiv preprint arXiv:1811.02141 (2018).
.. [4] Liu, <NAME>, <NAME>, and <NAME>. "On detecting clustered anomalies using SCiForest."
Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, Berlin, Heidelberg, 2010.
.. [5] https://sourceforge.net/projects/iforest/
.. [6] https://math.stackexchange.com/questions/3388518/expected-number-of-paths-required-to-separate-elements-in-a-binary-tree
.. [7] <NAME>. C4. 5: programs for machine learning. Elsevier, 2014.
.. [8] <NAME>. "Distance approximation using Isolation Forests."
arXiv preprint arXiv:1910.12362 (2019).
.. [9] <NAME>. "Imputing missing values with unsupervised random trees."
arXiv preprint arXiv:1911.06646 (2019).
.. [10] https://math.stackexchange.com/questions/3333220/expected-average-depth-in-random-binary-tree-constructed-top-to-bottom
"""
def __init__(self, sample_size = None, ntrees = 500, ndim = 3, ntry = 3,
categ_cols = None, max_depth = "auto", ncols_per_tree = None,
prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0,
prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0,
min_gain = 0., missing_action = "auto", new_categ_action = "auto",
categ_split_type = "subset", all_perm = False,
coef_by_prop = False, recode_categ = True,
weights_as_sample_prob = True, sample_with_replacement = False,
penalize_range = False, weigh_by_kurtosis = False,
coefs = "normal", assume_full_distr = True,
build_imputer = False, min_imp_obs = 3,
depth_imp = "higher", weigh_imp_rows = "inverse",
random_seed = 1, nthreads = -1,
n_estimators = None, max_samples = None,
n_jobs = None, random_state = None, bootstrap = None):
self.sample_size = sample_size
self.ntrees = ntrees
self.ndim = ndim
self.ntry = ntry
self.categ_cols = categ_cols
self.max_depth = max_depth
self.ncols_per_tree = ncols_per_tree
self.prob_pick_avg_gain = prob_pick_avg_gain
self.prob_pick_pooled_gain = prob_pick_pooled_gain
self.prob_split_avg_gain = prob_split_avg_gain
self.prob_split_pooled_gain = prob_split_pooled_gain
self.min_gain = min_gain
self.missing_action = missing_action
self.new_categ_action = new_categ_action
self.categ_split_type = categ_split_type
self.all_perm = all_perm
self.coef_by_prop = coef_by_prop
self.recode_categ = recode_categ
self.weights_as_sample_prob = weights_as_sample_prob
self.sample_with_replacement = sample_with_replacement
self.penalize_range = penalize_range
self.weigh_by_kurtosis = weigh_by_kurtosis
self.coefs = coefs
self.assume_full_distr = assume_full_distr
self.build_imputer = build_imputer
self.min_imp_obs = min_imp_obs
self.depth_imp = depth_imp
self.weigh_imp_rows = weigh_imp_rows
self.random_seed = random_seed
self.nthreads = nthreads
self.n_estimators = n_estimators
self.max_samples = max_samples
self.n_jobs = n_jobs
self.random_state = random_state
self.bootstrap = bootstrap
self._reset_obj()
def _init(self, categ_cols = None):
if categ_cols is not None:
if self.categ_cols is not None:
warnings.warn("Passed 'categ_cols' in constructor and fit method. Will take the latter.")
self.categ_cols = categ_cols
self._initialize_full(
sample_size = self.sample_size if (self.max_samples is None) else self.max_samples,
ntrees = self.ntrees if (self.n_estimators is None) else self.n_estimators,
ndim = self.ndim, ntry = self.ntry,
categ_cols = self.categ_cols,
max_depth = self.max_depth, ncols_per_tree = self.ncols_per_tree,
prob_pick_avg_gain = self.prob_pick_avg_gain, prob_pick_pooled_gain = self.prob_pick_pooled_gain,
prob_split_avg_gain = self.prob_split_avg_gain, prob_split_pooled_gain = self.prob_split_pooled_gain,
min_gain = self.min_gain, missing_action = self.missing_action, new_categ_action = self.new_categ_action,
categ_split_type = self.categ_split_type, all_perm = self.all_perm,
coef_by_prop = self.coef_by_prop, recode_categ = self.recode_categ,
weights_as_sample_prob = self.weights_as_sample_prob,
sample_with_replacement = self.sample_with_replacement if (self.bootstrap is None) else self.bootstrap,
penalize_range = self.penalize_range, weigh_by_kurtosis = self.weigh_by_kurtosis,
coefs = self.coefs, assume_full_distr = self.assume_full_distr,
build_imputer = self.build_imputer, min_imp_obs = self.min_imp_obs,
depth_imp = self.depth_imp, weigh_imp_rows = self.weigh_imp_rows,
random_seed = self.random_seed if (self.random_state is None) else self.random_state,
nthreads = self.nthreads if (self.n_jobs is None) else self.n_jobs)
def _initialize_full(self, sample_size = None, ntrees = 500, ndim = 3, ntry = 3,
categ_cols = None, max_depth = "auto", ncols_per_tree = None,
prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0,
prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0,
min_gain = 0., missing_action = "auto", new_categ_action = "auto",
categ_split_type = "subset", all_perm = False,
coef_by_prop = False, recode_categ = True,
weights_as_sample_prob = True, sample_with_replacement = False,
penalize_range = True, weigh_by_kurtosis = False,
coefs = "normal", assume_full_distr = True,
build_imputer = False, min_imp_obs = 3,
depth_imp = "higher", weigh_imp_rows = "inverse",
random_seed = 1, nthreads = -1):
if sample_size is not None:
assert sample_size > 0
if sample_size > 1:
assert isinstance(sample_size, int)
elif sample_size == 1:
sample_size = None
if ncols_per_tree is not None:
assert ncols_per_tree > 0
if ncols_per_tree > 1:
assert isinstance(ncols_per_tree, int)
elif ncols_per_tree == 1:
ncols_per_tree = None
assert ntrees > 0
assert isinstance(ntrees, int)
if (max_depth != "auto") and (max_depth is not None):
assert max_depth > 0
assert isinstance(max_depth, int)
if sample_size is not None:
assert max_depth < sample_size
assert ndim >= 1
assert isinstance(ndim, int)
assert ntry >= 1
assert isinstance(ntry, int)
if isinstance(random_seed, np.random.RandomState):
random_seed = random_seed.randint(np.iinfo(np.int32).max)
if isinstance(random_seed, np.random.Generator):
random_seed = random_seed.integers(np.iinfo(np.int32).max)
random_seed = int(random_seed)
assert random_seed >= 0
assert isinstance(min_imp_obs, int)
assert min_imp_obs >= 1
assert missing_action in ["divide", "impute", "fail", "auto"]
assert new_categ_action in ["weighted", "smallest", "random", "impute", "auto"]
assert categ_split_type in ["single_categ", "subset"]
assert coefs in ["normal", "uniform"]
assert depth_imp in ["lower", "higher", "same"]
assert weigh_imp_rows in ["inverse", "prop", "flat"]
assert prob_pick_avg_gain >= 0
assert prob_pick_pooled_gain >= 0
assert prob_split_avg_gain >= 0
assert prob_split_pooled_gain >= 0
assert min_gain >= 0
s = prob_pick_avg_gain + prob_pick_pooled_gain + prob_split_avg_gain + prob_split_pooled_gain
if s > 1:
warnings.warn("Split type probabilities sum to more than 1, will standardize them")
prob_pick_avg_gain /= s
prob_pick_pooled_gain /= s
prob_split_avg_gain /= s
prob_split_pooled_gain /= s
if (ndim == 1) and (sample_size is None) and ((prob_pick_avg_gain >= 1) or (prob_pick_pooled_gain >= 1)) and (not sample_with_replacement):
msg = "Passed parameters for deterministic single-variable splits"
msg += " with no sub-sampling. "
msg += "Every tree fitted will end up doing exactly the same splits. "
msg += "It's recommended to set 'prob_pick_avg_gain' < 1, 'prob_pick_pooled_gain' < 1, "
msg += "or to use the extended model (ndim > 1)."
warnings.warn(msg)
if missing_action == "auto":
if ndim == 1:
missing_action = "divide"
else:
missing_action = "impute"
if new_categ_action == "auto":
if ndim == 1:
new_categ_action = "weighted"
else:
new_categ_action = "impute"
if (build_imputer) and (missing_action == "fail"):
raise ValueError("Cannot impute missing values when passing 'missing_action' = 'fail'.")
if ndim == 1:
if new_categ_action == "impute":
raise ValueError("'new_categ_action' = 'impute' not supported in single-variable model.")
else:
if (prob_split_avg_gain > 0) or (prob_split_pooled_gain > 0):
msg = "Non-zero values for 'prob_split_avg_gain' "
msg += "and 'prob_split_pooled_gain' not meaningful in "
msg += "extended model."
raise ValueError(msg)
if missing_action == "divide":
raise ValueError("'missing_action' = 'divide' not supported in extended model.")
if new_categ_action == "weighted":
raise ValueError("'new_categ_action' = 'weighted' not supported in extended model.")
if nthreads is None:
nthreads = 1
elif nthreads < 0:
nthreads = multiprocessing.cpu_count() + 1 + nthreads
assert nthreads > 0
assert isinstance(nthreads, int)
if categ_cols is not None:
categ_cols = np.array(categ_cols).reshape(-1).astype(int)
categ_cols.sort()
self.sample_size = sample_size
self.ntrees = ntrees
self.ndim = ndim
self.ntry = ntry
self.categ_cols = categ_cols
self.max_depth = max_depth
self.ncols_per_tree = ncols_per_tree
self.prob_pick_avg_gain = prob_pick_avg_gain
self.prob_pick_pooled_gain = prob_pick_pooled_gain
self.prob_split_avg_gain = prob_split_avg_gain
self.prob_split_pooled_gain = prob_split_pooled_gain
self.min_gain = min_gain
self.missing_action = missing_action
self.new_categ_action = new_categ_action
self.categ_split_type = categ_split_type
self.coefs = coefs
self.depth_imp = depth_imp
self.weigh_imp_rows = weigh_imp_rows
self.min_imp_obs = min_imp_obs
self.random_seed = random_seed
self.nthreads = nthreads
self.all_perm = bool(all_perm)
self.recode_categ = bool(recode_categ)
self.coef_by_prop = bool(coef_by_prop)
self.weights_as_sample_prob = bool(weights_as_sample_prob)
self.sample_with_replacement = bool(sample_with_replacement)
self.penalize_range = bool(penalize_range)
self.weigh_by_kurtosis = bool(weigh_by_kurtosis)
self.assume_full_distr = bool(assume_full_distr)
self.build_imputer = bool(build_imputer)
self._reset_obj()
def _reset_obj(self):
self.cols_numeric_ = np.array([])
self.cols_categ_ = np.array([])
self._cat_mapping = list()
self._cat_max_lev = np.array([])
self._ncols_numeric = 0
self._ncols_categ = 0
self.is_fitted_ = False
self._ntrees = 0
self._cpp_obj = isoforest_cpp_obj()
self._is_extended_ = self.ndim > 1
def copy(self):
"""
Get a deep copy of this object
Returns
-------
copied : obj
A deep copy of this object
"""
if not self.is_fitted_:
self._cpp_obj = isoforest_cpp_obj()
return deepcopy(self)
else:
obj_restore = self._cpp_obj
obj_new = self._cpp_obj.deepcopy()
try:
self._cpp_obj = None
out = deepcopy(self)
finally:
self._cpp_obj = obj_restore
out._cpp_obj = obj_new
return out
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Kept for compatibility with scikit-learn.
Parameters
----------
deep : bool
Ignored.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
import inspect
return {param.name:getattr(self, param.name) for param in inspect.signature(self.__init__).parameters.values()}
def set_params(self, **params):
"""
Set the parameters of this estimator.
Kept for compatibility with scikit-learn.
Note
----
Setting any parameter other than the number of threads will reset the model
- that is, if it was fitted to some data, the fitted model will be lost.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not (len(params) == 1 and ("nthreads" in params or "n_jobs" in params)):
self.is_fitted_ = False
valid_params = self.get_params(deep=False)
for k,v in params.items():
if k not in valid_params:
raise ValueError("Invalid parameter: ", k)
setattr(self, k, v)
return self
def __str__(self):
msg = ""
if self._is_extended_:
msg += "Extended "
msg += "Isolation Forest model"
if (self.prob_pick_avg_gain + self.prob_pick_pooled_gain) > 0 or \
(self.ndim == 1 and (self.prob_split_avg_gain + self.prob_split_pooled_gain) > 0):
msg += " (using guided splits)"
msg += "\n"
if self.ndim > 1:
msg += "Splitting by %d variables at a time\n" % self.ndim
if self.is_fitted_:
msg += "Consisting of %d trees\n" % self._ntrees
if self._ncols_numeric > 0:
msg += "Numeric columns: %d\n" % self._ncols_numeric
if self._ncols_categ:
msg += "Categorical columns: %d\n" % self._ncols_categ
return msg
def __repr__(self):
return self.__str__()
def _get_model_obj(self):
return self._cpp_obj.get_cpp_obj(self._is_extended_)
def _get_imputer_obj(self):
return self._cpp_obj.get_imputer()
def fit(self, X, y = None, sample_weights = None, column_weights = None, categ_cols = None):
"""
Fit isolation forest model to data
Parameters
----------
X : array or array-like (n_samples, n_features)
Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.
If passing a DataFrame, will assume that columns are:
`Numeric`:
If their dtype is a subtype of NumPy's 'number' or 'datetime64'.
`Categorical`:
If their dtype is 'object', 'Categorical', or 'bool'.
Other dtypes are not supported.
y : None
Not used. Kept as argument for compatibility with SciKit-learn pipelining.
sample_weights : None or array(n_samples,)
Sample observation weights for each row of 'X', with higher weights indicating either higher sampling
probability (i.e. the observation has a larger effect on the fitted model, if using sub-samples), or
distribution density (i.e. if the weight is two, it has the same effect of including the same data
point twice), according to parameter 'weights_as_sample_prob' in the model constructor method.
column_weights : None or array(n_features,)
Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.
If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero.
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype.
Returns
-------
self : obj
This object.
"""
self._init(categ_cols)
if (self.sample_size is None) and (sample_weights is not None) and (self.weights_as_sample_prob):
raise ValueError("Sampling weights are only supported when using sub-samples for each tree.")
if column_weights is not None and self.weigh_by_kurtosis:
raise ValueError("Cannot pass column weights when weighting columns by kurtosis.")
self._reset_obj()
X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, sample_weights, column_weights)
if self.sample_size is None:
sample_size = nrows
elif self.sample_size < 1:
sample_size = int(np.ceil(self.sample_size * nrows))
if sample_size == 1:
raise ValueError("Sampling proportion amounts to a single row.")
else:
sample_size = self.sample_size
if self.max_depth == "auto":
max_depth = 0
limit_depth = True
elif self.max_depth is None:
max_depth = nrows - 1
limit_depth = False
else:
max_depth = self.max_depth
limit_depth = False
if self.ncols_per_tree is None:
ncols_per_tree = 0
elif self.ncols_per_tree < 1:
ncols_tot = 0
if X_num is not None:
ncols_tot += X_num.shape[1]
if X_cat is not None:
ncols_tot += X_cat.shape[1]
ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))
else:
ncols_per_tree = self.ncols_per_tree
if isinstance(self.random_state, np.random.RandomState):
seed = self.random_state.randint(np.iinfo(np.int32).max)
else:
seed = self.random_seed
self._cpp_obj.fit_model(_get_num_dtype(X_num, sample_weights, column_weights),
_get_int_dtype(X_num),
X_num, X_cat, ncat, sample_weights, column_weights,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self._ncols_numeric).value,
ctypes.c_size_t(self._ncols_categ).value,
ctypes.c_size_t(self.ndim).value,
ctypes.c_size_t(self.ntry).value,
self.coefs,
ctypes.c_bool(self.coef_by_prop).value,
ctypes.c_bool(self.sample_with_replacement).value,
ctypes.c_bool(self.weights_as_sample_prob).value,
ctypes.c_size_t(sample_size).value,
ctypes.c_size_t(self.ntrees).value,
ctypes.c_size_t(max_depth).value,
ctypes.c_size_t(ncols_per_tree).value,
ctypes.c_bool(limit_depth).value,
ctypes.c_bool(self.penalize_range).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(False).value,
ctypes.c_bool(self.weigh_by_kurtosis).value,
ctypes.c_double(self.prob_pick_avg_gain).value,
ctypes.c_double(self.prob_split_avg_gain).value,
ctypes.c_double(self.prob_pick_pooled_gain).value,
ctypes.c_double(self.prob_split_pooled_gain).value,
ctypes.c_double(self.min_gain).value,
self.missing_action,
self.categ_split_type,
self.new_categ_action,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_size_t(self.min_imp_obs).value,
self.depth_imp,
self.weigh_imp_rows,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_bool(False).value,
ctypes.c_uint64(seed).value,
ctypes.c_int(self.nthreads).value)
self.is_fitted_ = True
self._ntrees = self.ntrees
return self
def fit_predict(self, X, column_weights = None, output_outlierness = "score",
output_distance = None, square_mat = False, output_imputed = False,
categ_cols = None):
"""
Fit the model in-place and produce isolation or separation depths along the way
See the documentation of other methods ('init', 'fit', 'predict', 'predict_distance')
for details.
Note
----
The data must NOT contain any duplicate rows.
Note
----
This function will be faster at predicting average depths than calling 'fit' + 'predict'
separately when using full row samples.
Note
----
If using 'penalize_range' = 'True', the resulting scores/depths from this function might differ a bit
from those of 'fit' + 'predict' ran separately.
Note
----
Sample weights are not supported for this method.
Parameters
----------
X : array or array-like (n_samples, n_features)
Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix.
If passing a DataFrame, will assume that columns are:
`Numeric`:
If their dtype is a subtype of NumPy's 'number' or 'datetime64'.
`Categorical`:
If their dtype is 'object', 'Categorical', or 'bool'.
Other dtypes are not supported.
column_weights : None or array(n_features,)
Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion.
If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis.
Note that, if passing a DataFrame with both numeric and categorical columns, the column names must
not be repeated, otherwise the column weights passed here will not end up matching.
output_outlierness : None or str in ["score", "avg_depth"]
Desired type of outlierness output. If passing "score", will output standardized outlier score.
If passing "avg_depth" will output average isolation depth without standardizing.
If passing 'None', will skip outlierness calculations.
output_distance : None or str in ["dist", "avg_sep"]
Type of distance output to produce. If passing "dist", will standardize the average separation
depths. If passing "avg_sep", will output the average separation depth without standardizing it
(note that lower separation depth means furthest distance). If passing 'None', will skip distance calculations.
square_mat : bool
Whether to produce a full square matrix with the distances. If passing 'False', will output
only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at
position p(i,j) = (i * (n - (i+1)/2) + j - i - 1). Ignored when passing 'output_distance' = 'None'.
output_imputed : bool
Whether to output the data with imputed missing values. Model object must have been initialized
with 'build_imputer' = 'True'.
categ_cols : None or array-like
Columns that hold categorical features, when the data is passed as an array or matrix.
Categorical columns should contain only integer values with a continuous numeration starting at zero,
with negative values and NaN taken as missing,
and the array or list passed here should correspond to the column numbers, with numeration starting
at zero.
This might be passed either at construction time or when calling ``fit`` or variations of ``fit``.
This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as
categorical depending on their dtype.
Returns
-------
output : array(n_samples,), or dict
Requested outputs about isolation depth (outlierness), pairwise separation depth (distance), and/or
imputed missing values. If passing either 'output_distance' or 'output_imputed', will return a dictionary
with keys "pred" (array(n_samples,)), "dist" (array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples)),
"imputed" (array-like(n_samples, n_columns)), according to whether each output type is present.
"""
self._init(categ_cols)
if self.sample_size is not None:
raise ValueError("Cannot use 'fit_predict' when the sample size is limited.")
if self.sample_with_replacement:
raise ValueError("Cannot use 'fit_predict' or 'fit_transform' when sampling with replacement.")
if column_weights is not None and self.weigh_by_kurtosis:
raise ValueError("Cannot pass column weights when weighting columns by kurtosis.")
if (output_outlierness is None) and (output_distance is None):
raise ValueError("Must pass at least one of 'output_outlierness' or 'output_distance'.")
if output_outlierness is not None:
assert output_outlierness in ["score", "avg_depth"]
if output_distance is not None:
assert output_distance in ["dist", "avg_sep"]
if output_imputed:
if self.missing_action == "fail":
raise ValueError("Cannot impute missing values when using 'missing_action' = 'fail'.")
if not self.build_imputer:
msg = "Trying to impute missing values from object "
msg += "that was initialized with 'build_imputer' = 'False' "
msg += "- will force 'build_imputer' to 'True'."
warnings.warn(msg)
self.build_imputer = True
self._reset_obj()
X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, None, column_weights)
if (output_imputed) and (issparse(X_num)):
msg = "Imputing missing values from CSC matrix on-the-fly can be very slow, "
msg += "it's recommended if possible to fit the model first and then pass the "
msg += "same matrix as CSR to 'transform'."
warnings.warn(msg)
if self.max_depth == "auto":
max_depth = 0
limit_depth = True
elif self.max_depth is None:
max_depth = nrows - 1
else:
max_depth = self.max_depth
limit_depth = False
if self.ncols_per_tree is None:
ncols_per_tree = 0
elif self.ncols_per_tree < 1:
ncols_tot = 0
if X_num is not None:
ncols_tot += X_num.shape[1]
if X_cat is not None:
ncols_tot += X_cat.shape[1]
ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot))
else:
ncols_per_tree = self.ncols_per_tree
if isinstance(self.random_state, np.random.RandomState):
seed = self.random_state.randint(np.iinfo(np.int32).max)
else:
seed = self.random_seed
depths, tmat, dmat, X_num, X_cat = self._cpp_obj.fit_model(_get_num_dtype(X_num, None, column_weights),
_get_int_dtype(X_num),
X_num, X_cat, ncat, None, column_weights,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self._ncols_numeric).value,
ctypes.c_size_t(self._ncols_categ).value,
ctypes.c_size_t(self.ndim).value,
ctypes.c_size_t(self.ntry).value,
self.coefs,
ctypes.c_bool(self.coef_by_prop).value,
ctypes.c_bool(self.sample_with_replacement).value,
ctypes.c_bool(self.weights_as_sample_prob).value,
ctypes.c_size_t(nrows).value,
ctypes.c_size_t(self.ntrees).value,
ctypes.c_size_t(max_depth).value,
ctypes.c_size_t(ncols_per_tree).value,
ctypes.c_bool(limit_depth).value,
ctypes.c_bool(self.penalize_range).value,
ctypes.c_bool(output_distance is not None).value,
ctypes.c_bool(output_distance == "dist").value,
ctypes.c_bool(square_mat).value,
ctypes.c_bool(output_outlierness is not None).value,
ctypes.c_bool(output_outlierness == "score").value,
ctypes.c_bool(self.weigh_by_kurtosis).value,
ctypes.c_double(self.prob_pick_avg_gain).value,
ctypes.c_double(self.prob_split_avg_gain).value,
ctypes.c_double(self.prob_pick_pooled_gain).value,
ctypes.c_double(self.prob_split_pooled_gain).value,
ctypes.c_double(self.min_gain).value,
self.missing_action,
self.categ_split_type,
self.new_categ_action,
ctypes.c_bool(self.build_imputer).value,
ctypes.c_size_t(self.min_imp_obs).value,
self.depth_imp,
self.weigh_imp_rows,
ctypes.c_bool(output_imputed).value,
ctypes.c_bool(self.all_perm).value,
ctypes.c_uint64(seed).value,
ctypes.c_int(self.nthreads).value)
self.is_fitted_ = True
self._ntrees = self.ntrees
if (not output_distance) and (not output_imputed):
return depths
else:
outp = {"pred" : depths}
if output_distance:
if square_mat:
outp["dist"] = dmat
else:
outp["dist"] = tmat
if output_imputed:
outp["imputed"] = self._rearrange_imputed(X, X_num, X_cat)
return outp
def _process_data(self, X, sample_weights, column_weights):
### TODO: this needs a refactoring after introducing 'categ_cols'
if X.__class__.__name__ == "DataFrame":
if self.categ_cols is not None:
warnings.warn("'categ_cols' is ignored when passing a DataFrame as input.")
self.categ_cols = None
### https://stackoverflow.com/questions/25039626/how-do-i-find-numeric-columns-in-pandas
X_num = X.select_dtypes(include = [np.number, np.datetime64]).to_numpy()
if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:
X_num = X_num.astype(ctypes.c_double)
if not _is_col_major(X_num):
X_num = np.asfortranarray(X_num)
X_cat = X.select_dtypes(include = [pd.CategoricalDtype, "object", "bool"]).copy()
if (X_num.shape[1] + X_cat.shape[1]) == 0:
raise ValueError("Input data has no columns of numeric or categorical type.")
elif (X_num.shape[1] + X_cat.shape[1]) < X.shape[1]:
cols_num = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values)
cols_cat = np.array(X_cat.columns.values)
msg = "Only numeric and categorical columns are supported."
msg += " Got passed the following types: ["
msg += ", ".join([str(X[cl].dtype) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3])
msg += "]\n(Sample problem columns: ["
msg += ", ".join([str(cl) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3])
msg += "])"
raise ValueError(msg)
self._ncols_numeric = X_num.shape[1]
self._ncols_categ = X_cat.shape[1]
self.cols_numeric_ = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values)
self.cols_categ_ = np.array(X.select_dtypes(include = [pd.CategoricalDtype, "object", "bool"]).columns.values)
if not self._ncols_numeric:
X_num = None
else:
nrows = X_num.shape[0]
if not self._ncols_categ:
X_cat = None
else:
nrows = X_cat.shape[0]
has_ordered = False
if X_cat is not None:
self._cat_mapping = [None for cl in range(X_cat.shape[1])]
for cl in range(X_cat.shape[1]):
if (X_cat[X_cat.columns[cl]].dtype.name == "category") and (X_cat[X_cat.columns[cl]].dtype.ordered):
has_ordered = True
if (not self.recode_categ) and (X_cat[X_cat.columns[cl]].dtype.name == "category"):
self._cat_mapping[cl] = np.array(X_cat[X_cat.columns[cl]].cat.categories)
X_cat[X_cat.columns[cl]] = X_cat[X_cat.columns[cl]].cat.codes
else:
X_cat[X_cat.columns[cl]], self._cat_mapping[cl] = pd.factorize(X_cat[X_cat.columns[cl]])
if (self.all_perm
and (self.ndim == 1)
and (self.prob_pick_pooled_gain or self.prob_split_pooled_gain)
):
if np.math.factorial(self._cat_mapping[cl].shape[0]) > np.iinfo(ctypes.c_size_t).max:
msg = "Number of permutations for categorical variables is larger than "
msg += "maximum representable integer. Try using 'all_perm=False'."
raise ValueError(msg)
# https://github.com/pandas-dev/pandas/issues/30618
if self._cat_mapping[cl].__class__.__name__ == "CategoricalIndex":
self._cat_mapping[cl] = self._cat_mapping[cl].to_numpy()
X_cat = X_cat.to_numpy()
if X_cat.dtype != ctypes.c_int:
X_cat = X_cat.astype(ctypes.c_int)
if not _is_col_major(X_cat):
X_cat = np.asfortranarray(X_cat)
if has_ordered:
warnings.warn("Data contains ordered categoricals. These are treated as unordered.")
else:
if len(X.shape) != 2:
raise ValueError("Input data must be two-dimensional.")
X_cat = None
if self.categ_cols is not None:
if np.max(self.categ_cols) >= X.shape[1]:
raise ValueError("'categ_cols' contains indices higher than the number of columns in 'X'.")
self.cols_numeric_ = np.setdiff1d(np.arange(X.shape[1]), self.categ_cols)
if issparse(X) and not isspmatrix_csc(X):
X = csc_matrix(X)
X_cat = X[:, self.categ_cols]
X = X[:, self.cols_numeric_]
if X.shape[1]:
if issparse(X):
avoid_sort = False
if not isspmatrix_csc(X):
warnings.warn("Sparse matrices are only supported in CSC format, will be converted.")
X = csc_matrix(X)
avoid_sort = True
if X.nnz == 0:
raise ValueError("'X' has no non-zero entries")
if ((X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or
(X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]) or
(X.indptr.dtype != X.indices.dtype) or
(X.data.dtype not in [ctypes.c_double, ctypes.c_float])
):
X = X.copy()
if X.data.dtype not in [ctypes.c_double, ctypes.c_float]:
X.data = X.data.astype(ctypes.c_double)
if (X.indptr.dtype != X.indices.dtype) or (X.indices.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):
X.indices = X.indices.astype(ctypes.c_size_t)
if (X.indptr.dtype != X.indices.dtype) or (X.indptr.dtype not in [ctypes.c_int, np.int64, ctypes.c_size_t]):
X.indptr = X.indptr.astype(ctypes.c_size_t)
if not avoid_sort:
_sort_csc_indices(X)
else:
if (X.__class__.__name__ == "ndarray") and (X.dtype not in [ctypes.c_double, ctypes.c_float]):
X = X.astype(ctypes.c_double)
if (X.__class__.__name__ != "ndarray") or (not _is_col_major(X)):
X = np.asfortranarray(X)
if X.dtype not in [ctypes.c_double, ctypes.c_float]:
X = X.astype(ctypes.c_double)
self._ncols_numeric = X.shape[1]
self._ncols_categ = 0 if (X_cat is None) else X_cat.shape[1]
if self.categ_cols is None:
self.cols_numeric_ = np.array([])
self.cols_categ_ = np.array([])
self._cat_mapping = list()
if (self._ncols_numeric + self._ncols_categ) == 0:
raise ValueError("'X' has zero columns.")
if X.shape[1]:
X_num = X
nrows = X_num.shape[0]
else:
X_num = None
if X_cat is not None:
if issparse(X_cat):
X_cat = X_cat.toarray()
if np.any(np.isnan(X_cat)):
X_cat = X_cat.copy()
X_cat[np.isnan(X_cat)] = -1
if X_cat.dtype != ctypes.c_int:
X_cat = X_cat.astype(ctypes.c_int)
if not _is_col_major(X_cat):
X_cat = np.asfortranarray(X_cat)
self._cat_max_lev = np.max(X_cat, axis=0)
if np.any(self._cat_max_lev < 0):
warnings.warn("Some categorical columns contain only missing values.")
nrows = X_cat.shape[0]
if nrows == 0:
raise ValueError("Input data has zero rows.")
elif nrows < 2:
raise ValueError("Input data must have at least 2 rows.")
elif self.sample_size is not None:
if self.sample_size > nrows:
warnings.warn("Input data has fewer rows than sample_size, will decrease sample_size.")
self.sample_size = None
if X_cat is not None:
if self.categ_cols is None:
ncat = np.array([self._cat_mapping[cl].shape[0] for cl in range(X_cat.shape[1])], dtype = ctypes.c_int)
else:
if self._cat_max_lev is None:
self._cat_max_lev = []
if not isinstance(self._cat_max_lev, np.ndarray):
self._cat_max_lev = np.array(self._cat_max_lev)
ncat = self._cat_max_lev + 1
if ncat.dtype != ctypes.c_int:
ncat = ncat.astype(ctypes.c_int)
else:
ncat = None
if sample_weights is not None:
sample_weights = np.array(sample_weights).reshape(-1)
if (X_num is not None) and (X_num.dtype != sample_weights.dtype):
sample_weights = sample_weights.astype(X_num.dtype)
if sample_weights.dtype not in [ctypes.c_double, ctypes.c_float]:
sample_weights = sample_weights.astype(ctypes.c_double)
if sample_weights.shape[0] != nrows:
raise ValueError("'sample_weights' has different number of rows than 'X'.")
ncols = 0
if X_num is not None:
ncols += X_num.shape[1]
if X_cat is not None:
ncols += X_cat.shape[1]
if column_weights is not None:
column_weights = np.array(column_weights).reshape(-1)
if (X_num is not None) and (X_num.dtype != column_weights.dtype):
column_weights = column_weights.astype(X_num.dtype)
if column_weights.dtype not in [ctypes.c_double, ctypes.c_float]:
column_weights = column_weights.astype(ctypes.c_double)
if ncols != column_weights.shape[0]:
raise ValueError("'column_weights' has %d entries, but data has %d columns." % (column_weights.shape[0], ncols))
if (X_num is not None) and (X_cat is not None):
column_weights = np.r_[column_weights[X.columns.values == self.cols_numeric_],
column_weights[X.columns.values == self.cols_categ_]]
if (sample_weights is not None) and (column_weights is not None) and (sample_weights.dtype != column_weights.dtype):
sample_weights = sample_weights.astype(ctypes.c_double)
column_weights = column_weights.astype(ctypes.c_double)
if self.ndim > 1:
if self.ndim > ncols:
msg = "Model was meant to take %d variables for each split, but data has %d columns."
msg += " Will decrease number of splitting variables to match number of columns."
msg = msg % (self.ndim, ncols)
warnings.warn(msg)
self.ndim = ncols
if self.ndim < 2:
self._is_extended_ = False
X_num = _copy_if_subview(X_num)
X_cat = _copy_if_subview(X_cat)
return X_num, X_cat, ncat, sample_weights, column_weights, nrows
def _process_data_new(self, X, allow_csr = True, allow_csc = True, prefer_row_major = False):
if X.__class__.__name__ == "DataFrame":
if ((self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]) > 0) and (self.categ_cols is None):
if self.categ_cols is None:
missing_cols = np.setdiff1d(np.r_[self.cols_numeric_, self.cols_categ_], np.array(X.columns.values))
if missing_cols.shape[0] > 0:
raise ValueError("Input data is missing %d columns - example: [%s]" % (missing_cols.shape[0], ", ".join(missing_cols[:3])))
else:
if X.shape[1] < (self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]):
raise ValueError("Error: expected input with %d columns - got: %d." %
((self.cols_numeric_.shape[0] + self.cols_categ_.shape[0]), X.shape[1]))
if self._ncols_numeric > 0:
if self.categ_cols is None:
X_num = X[self.cols_numeric_].to_numpy()
else:
X_num = X.iloc[:, self.cols_numeric_].to_numpy()
if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:
X_num = X_num.astype(ctypes.c_double)
if (not prefer_row_major) and (not _is_col_major(X_num)):
X_num = np.asfortranarray(X_num)
nrows = X_num.shape[0]
else:
X_num = None
if self._ncols_categ > 0:
if self.categ_cols is None:
X_cat = X[self.cols_categ_].copy()
for cl in range(self._ncols_categ):
X_cat[self.cols_categ_[cl]] = pd.Categorical(X_cat[self.cols_categ_[cl]], self._cat_mapping[cl]).codes
else:
X_cat = X.iloc[:, self.categ_cols]
X_cat = X_cat.to_numpy()
if X_cat.dtype != ctypes.c_int:
X_cat = X_cat.astype(ctypes.c_int)
if (not prefer_row_major) and (not _is_col_major(X_cat)):
X_cat = np.asfortranarray(X_cat)
nrows = X_cat.shape[0]
else:
X_cat = None
elif self._ncols_categ == 0:
if X.shape[1] < self._ncols_numeric:
raise ValueError("Input has different number of columns than data to which model was fit.")
X_num = X.to_numpy()
if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:
X_num = X_num.astype(ctypes.c_double)
if (not prefer_row_major) and (not _is_col_major(X_num)):
X_num = np.asfortranarray(X_num)
X_cat = None
nrows = X_num.shape[0]
elif self._ncols_numeric == 0:
if X.shape[1] < self._ncols_categ:
raise ValueError("Input has different number of columns than data to which model was fit.")
X_cat = X.to_numpy()[:, :self._ncols_categ]
if X_cat.dtype != ctypes.c_int:
X_cat = X_cat.astype(ctypes.c_int)
if (not prefer_row_major) and (not _is_col_major(X_cat)):
X_cat = np.asfortranarray(X_cat)
X_num = None
nrows = X_cat.shape[0]
else:
nrows = X.shape[0]
X_num = X.iloc[:, self.cols_numeric_].to_numpy()
X_cat = X.iloc[:, self.categ_cols].to_numpy()
if X_num.dtype not in [ctypes.c_double, ctypes.c_float]:
X_num = X_num.astype(ctypes.c_double)
if (not prefer_row_major) and (not _is_col_major(X_num)):
X_num =
|
np.asfortranarray(X_num)
|
numpy.asfortranarray
|
# Predict International Airline Passengers (t+whatever, given t)
import numpy as np
import pandas
from autolrn.regression.timeseries import ts_utils as tu
from sklearn.metrics import mean_squared_error
# ...
# fix random seed for reproducibility
|
np.random.seed(7)
|
numpy.random.seed
|
"""Miscellaneous helper routines."""
from typing import Tuple # noqa: flake8 bug, #118
from typing import List
import numpy as np
def _split_data(
data: dict,
) -> Tuple[List[str], List[float], List[Tuple[int, ...]], List[str], List[int], List[Tuple[int, ...]]]:
"""Prepare data for use in an array_var_context constructor.
array_var_context is a C++ class defined in Stan. See
``array_var_context.hpp`` for details.
The constructor signature is::
array_var_context(const std::vector<std::string>& names_r,
const std::vector<double>& values_r,
const std::vector<std::vector<size_t> >& dim_r,
const std::vector<std::string>& names_i,
const std::vector<int>& values_i,
const std::vector<std::vector<size_t> >& dim_i)
Multi-dimensional data is flattened using column-major order when passed to
``array_var_context``. Stan uses column-major order. Numpy, by constrast,
uses row-major order by default. To unravel a multi-dimensional array using
column-major order using numpy indicate order `F` ('F' stands for Fortran).
Arguments:
data: Mapping of names to values (e.g., {'y': [0, 1, 2]}).
Returns:
Arguments with types matching the signature of ``array_var_context``.
"""
data = data.copy()
names_r: List[str] = []
values_r: List[float] = []
dim_r: List[Tuple[int, ...]] = []
names_i: List[str] = []
values_i: List[int] = []
dim_i: List[Tuple[int, ...]] = []
for k, v in data.items():
if np.issubdtype(np.asarray(v).dtype, np.floating):
names_r.append(k.encode("utf-8"))
# unravel multi-dimensional arrays using column-major ('F') order
values_r.extend(np.atleast_1d(v).ravel(order="F").astype(float))
dim_r.append(np.asarray(v).shape)
elif np.issubdtype(
|
np.asarray(v)
|
numpy.asarray
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import torch.nn.functional as F
def running_avg(x, y, k=.99):
return k * x + (1 - k) * y
def softmax(x, d=-1):
tmp = np.exp(np.array(x))
return tmp / tmp.sum(axis=d, keepdims=True)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def inv_sigmoid(x):
return np.log(x / (1 - x))
def dist(a, b):
return np.linalg.norm(a - b)
def smooth_arr(arr, window=3):
to_flatten = False
if arr.ndim == 1:
to_flatten = True
arr = np.expand_dims(arr, 1)
pad = window // 2
tmp_arr = F.pad(
torch.unsqueeze(torch.Tensor(arr.T), 0), [pad, pad], mode='reflect')
tmp_arr = np.array(F.avg_pool1d(tmp_arr, window, stride=1).data)
tmp_arr = tmp_arr[0].T
if to_flatten:
tmp_arr = tmp_arr[:, 0]
return tmp_arr
def decathlon_score(scores, task_idxs=None):
if task_idxs is None:
task_idxs = [i for i in range(10)]
baseline_err = 1 - np.array([
59.87, 60.34, 82.12, 92.82, 55.53, 97.53, 81.41, 87.69, 96.55, 51.20
]) / 100
baseline_err = baseline_err[task_idxs]
num_tasks = len(task_idxs)
max_err = 2 * baseline_err
gamma_vals = np.ones(num_tasks) * 2
alpha_vals = 1000 * (max_err)**(-gamma_vals)
err = 1 - scores
if num_tasks == 1:
err = [err]
all_scores = []
for i in range(num_tasks):
all_scores += [alpha_vals[i] * max(0, max_err[i] - err[i])**gamma_vals[i]]
return sum(all_scores), all_scores
def rescale(x, min_val, max_val, invert=False):
if not invert:
return x * (max_val - min_val) + min_val
else:
return (x - min_val) / (max_val - min_val)
def pow10(x, min_val, max_val, invert=False):
log_fn = np.log if type(x) is float else torch.log
if not invert:
return 10**rescale(x,
np.log(min_val) / np.log(10),
np.log(max_val) / np.log(10))
else:
return rescale(
log_fn(x) / np.log(10),
np.log(min_val) / np.log(10),
np.log(max_val) / np.log(10), invert)
def map_val(x, min_val, max_val, scale='linear', invert=False):
if scale == 'log':
map_fn = pow10
elif scale == 'linear':
map_fn = rescale
return map_fn(x, min_val, max_val, invert)
def reverse_tensor(t, dim):
return t.index_select(dim, torch.arange(t.shape[dim] - 1, -1, -1).long())
def convert_mat_aux(m, d, min_, max_, invert=False):
if invert:
m = (m - min_) / (max_ - min_ + 1e-5)
else:
m = m * (max_ - min_) + min_
m = np.triu(m, 1)
return m + m.T + np.diag(d)
def convert_mat(mat, invert=False):
if mat.dim() == 3:
# mat is 2 x n x n
# where mat[0] is the forward matrix, and mat[1] is the backward one
mat = np.array(mat)
# Convert forward matrix
d_f = mat[0].diagonal()
min_ = np.maximum(0, np.add.outer(d_f, d_f) - 1)
max_ = np.minimum.outer(d_f, d_f)
m_f = convert_mat_aux(mat[0], d_f, min_, max_, invert=invert)
# Convert backward matrix
d_b = mat[1].diagonal()
if not invert:
d_b = d_b * d_f
tmp_m = mat[0] if invert else m_f
min_ = np.maximum(0,
|
np.add.outer(d_b, d_b)
|
numpy.add.outer
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestRankLossOp(OpTest):
def setUp(self):
self.op_type = "rank_loss"
shape = (5, 1)
# labels_{i} = {0, 1.0} or {0, 0.5, 1.0}
label_shape, left_shape, right_shape = self.set_shape()
label = np.random.randint(0, 2, size=shape).astype("float32")
left =
|
np.random.random(shape)
|
numpy.random.random
|
import numpy as np
import math
import sys
sys.path.append('..')
from functools import partial
from utils.functions import tensor_4d_mesh, sample_from_matrix
from scipy.stats import multivariate_normal, poisson, rv_discrete, expon
from scipy.linalg import sqrtm
from scipy.optimize import Bounds, minimize
class InfernoToyLoader:
def __init__(self, s_param=50, r_param=0.0, lambda_param=3.0, b_param=1000, benchmark=None,
nuisance_parameters=False, s_low=0, s_high=100, r_low=-5, r_high=5, lambda_low=0, lambda_high=10,
b_low=700, b_high=1300, out_dir='inferno_toy/', num_acore_grid=21, num_pred_grid=21,
empirical_marginal=True, *args, **kwargs):
self.true_s = s_param
self.s_low = s_low
self.s_high = s_high
self.sigmas_mat = [np.diag([5, 9]), np.diag([1, 1])]
self.low_int = -5
self.high_int = 1300
if benchmark is not None:
if benchmark not in [0, 1, 2, 3, 4, 5]:
raise ValueError('benchmark variable needs to be an integer between 0 and 4, corresponding '
'to the setup of the INFERNO paper by <NAME> (2018). '
'Currently %s.' % benchmark)
if benchmark == 0:
self.true_r = 0.0
self.true_lambda = 3.0
self.true_b = 1000
self.r_low = 0.0
self.r_high = 0.0
self.lambda_low = 3.0
self.lambda_high = 3.0
self.b_low = 1000.0
self.b_high = 1000.0
if benchmark == 1:
self.true_r = r_param
self.true_lambda = 3.0
self.true_b = 1000
self.r_low = r_low
self.r_high = r_high
self.lambda_low = 3.0
self.lambda_high = 3.0
self.b_low = 1000.0
self.b_high = 1000.0
if benchmark == 2:
self.true_r = r_param
self.true_lambda = lambda_param
self.true_b = 1000
self.r_low = r_low
self.r_high = r_high
self.lambda_low = lambda_low
self.lambda_high = lambda_high
self.b_low = 1000.0
self.b_high = 1000.0
if benchmark == 3:
self.true_r = r_param
self.true_lambda = lambda_param
self.true_b = 1000
self.r_low = -1.2
self.r_high = 1.2
self.lambda_low = 0.0
self.lambda_high = 6.0
self.b_low = 1000.0
self.b_high = 1000.0
if benchmark == 4:
self.true_r = r_param
self.true_lambda = lambda_param
self.true_b = b_param
self.r_low = -1.2
self.r_high = 1.2
self.lambda_low = 0.0
self.lambda_high = 6.0
self.b_low = b_low
self.b_high = b_high
if benchmark == 5:
self.true_r = 0.0
self.true_lambda = 3.0
self.true_b = b_param
self.r_low = 0.0
self.r_high = 0.0
self.lambda_low = 3.0
self.lambda_high = 3.0
self.b_low = b_low
self.b_high = b_high
else:
self.true_r = r_param
self.true_lambda = lambda_param
self.true_b = b_param
self.r_low = r_low
self.r_high = r_high
self.lambda_low = lambda_low
self.lambda_high = lambda_high
self.b_low = b_low
self.b_high = b_high
self.true_param = np.array([self.true_s, self.true_r, self.true_lambda, self.true_b])
active_params_tuple = [True, False, False, False]
low_bounds, high_bounds = [], []
for idx, (low, high) in enumerate([(self.r_low, self.r_high), (self.lambda_low, self.lambda_high),
(self.b_low, self.b_high)]):
if low != high:
active_params_tuple[idx + 1] = True
low_bounds.append(low)
high_bounds.append(high)
self.bounds_opt = Bounds(low_bounds, high_bounds)
self.d = sum(active_params_tuple)
self.active_params = self.d
self.active_params_cols = np.where(active_params_tuple)[0]
self.target_params_cols = [0] # target parameter is always the signal
# If nuisance parameters are treated as such, then determine which columns are nuisance parameters and
# which are not
self.nuisance_flag = self.d > 1 and nuisance_parameters
self.nuisance_params_cols = np.where(active_params_tuple)[0][1:] if self.nuisance_flag else None
self.t0_grid_nuisance = None
self.nuisance_global_param_val = None
# Prediction grids have to be mindful of how many parameters are active
self.num_pred_grid = num_pred_grid
self.num_acore_grid = num_acore_grid
# For the ACORE grid, it really depends on whether we consider nuisance parameters or not
if self.nuisance_flag:
self.pred_grid = np.linspace(start=self.s_low, stop=self.s_high, num=self.num_pred_grid)
self.idx_row_true_param = np.where((self.pred_grid == self.true_param[0]))[0][0]
self.acore_grid = None
else:
self.pred_grid = np.unique(
tensor_4d_mesh(np.meshgrid(np.linspace(start=self.s_low, stop=self.s_high, num=self.num_pred_grid),
np.linspace(start=self.r_low, stop=self.r_high, num=self.num_pred_grid),
np.linspace(start=self.lambda_low, stop=self.lambda_high,
num=self.num_pred_grid),
np.linspace(start=self.b_low, stop=self.b_high, num=self.num_pred_grid))),
axis=0)[:, self.active_params_cols]
self.idx_row_true_param = np.where((self.pred_grid == self.true_param[self.active_params_cols]).all(
axis=1))[0][0]
self.acore_grid = np.unique(
tensor_4d_mesh(np.meshgrid(np.linspace(start=self.s_low, stop=self.s_high, num=self.num_pred_grid),
np.linspace(start=self.r_low, stop=self.r_high, num=self.num_pred_grid),
np.linspace(start=self.lambda_low, stop=self.lambda_high,
num=self.num_pred_grid),
np.linspace(start=self.b_low, stop=self.b_high, num=self.num_acore_grid))),
axis=0)[:, self.active_params_cols]
self.regen_flag = False
self.out_directory = out_dir
self.empirical_marginal = empirical_marginal
self.mean_instrumental = np.array([1, 1, 2])
self.cov_instrumental = np.diag([5, 10, 5])
self.g_distribution = multivariate_normal(mean=self.mean_instrumental, cov=self.cov_instrumental)
self.b_sample_vec = [50, 100, 500, 1000, 5000, 10000, 50000, 100000]
self.b_prime_vec = [500, 1000, 5000, 10000, 50000]
self.d_obs = 3
self.nuisance_param_val = None
@staticmethod
def _compute_mean_vec(r_param):
return [np.array([2.0 + r_param, 0]), np.array([1, 1])]
@staticmethod
def _compute_mixing_param(b_param, s_param):
return b_param / (s_param + b_param)
def set_reference_g(self, size_reference):
sample_mat_ref = self.generate_sample(sample_size=size_reference, p=1.0)[:, (self.d + 1):]
self.mean_instrumental = np.average(sample_mat_ref, axis=0)
self.cov_instrumental = np.diag(np.std(sample_mat_ref, axis=0) ** 2)
self.g_distribution = multivariate_normal(mean=self.mean_instrumental, cov=self.cov_instrumental)
def sample_empirical_marginal(self, sample_size):
theta_vec_marg = self.sample_param_values(sample_size=sample_size)
sample_mat = np.apply_along_axis(arr=theta_vec_marg.reshape(-1, self.d), axis=1,
func1d=lambda row: self.sample_sim(
sample_size=1, true_param=row)).reshape(-1, self.d_obs)
return sample_mat
def sample_param_values(self, sample_size):
full_mat = np.random.uniform(low=self.s_low, high=self.s_high, size=sample_size).reshape(-1, 1)
if 1 in self.active_params_cols:
r_vec =
|
np.random.uniform(low=self.r_low, high=self.r_high, size=sample_size)
|
numpy.random.uniform
|
import os,sys
import vtk,vtktools
import numpy as np
import u2r
class Mesh_Information():
def __init__(self):
self.nNodes = 0
self.nEl = 0
self.nloc = 0
self.field_names = []
self.subtract_mean = False
self.nDim = 0
class Grid_Information():
def __init__(self):
self.nx = 0
self.ny = 0
self.nz = 1
self.nGrids = 0
self.ddx = []
self.grid_origin = []
self.grid_width = []
def set_mesh_info(nNodes, nEl, nloc, nDim, nFields, field_names):
mesh_info = Mesh_Information()
mesh_info.nNodes = nNodes
mesh_info.nEl = nEl
mesh_info.nloc = nloc
mesh_info.nDim = nDim
mesh_info.nFields = nFields
mesh_info.field_names = field_names
return mesh_info
def set_grid_info(nx, ny, nz, nGrids, ddx, grid_origin, grid_width):
grid_info = Grid_Information()
grid_info.nx = nx
grid_info.ny = ny
grid_info.nz = nz
grid_info.nGrids = nGrids
grid_info.ddx = ddx
grid_info.grid_origin = grid_origin
grid_info.grid_width = grid_width
return grid_info
def get_mesh_info(mesh_info):
return mesh_info.nNodes, mesh_info.nEl, mesh_info.nloc, mesh_info.nDim, mesh_info.nFields, mesh_info.field_names
def get_grid_info(grid_info):
return grid_info.nx, grid_info.ny, grid_info.nz, grid_info.nGrids, grid_info.ddx, grid_info.grid_origin, grid_info.grid_width
def get_global_node_numbers(nEl, nloc, represnetative_vtu):
x_ndgln = np.zeros((nEl*nloc), dtype=int)
for iEl in range(nEl):
n = represnetative_vtu.GetCellPoints(iEl) + 1
x_ndgln[iEl*nloc:(iEl+1)*nloc] = n
return x_ndgln
def get_block_origin(grid_origin, grid_width, iGrid):
return np.array(( grid_origin[0]+iGrid*grid_width[0], grid_origin[1] +iGrid*grid_width[1]))
def find_node_duplications_from_overlapping_grids(representative_vtu, mesh_info, grid_info, x_all, x_ndgln):
nNodes, nEl, nloc, nDim, nFields, field_names = get_mesh_info(mesh_info)
nx, ny, nz, nGrids, ddx, grid_origin, grid_width = get_grid_info(grid_info)
# obtain a field of ones the size of the first field
# assumes all fields are defined over the same number of nodes
my_field = representative_vtu.GetField(field_names[0])[:,0]
my_field = 1
nScalar_test = 1
# for one timestep and for one field, map the solution (field of value one) from the mesh to the grid and back to the mesh again
nTime = 1
value_mesh = np.zeros((nScalar_test,nNodes,nTime))
value_mesh[:,:,0] = np.transpose(my_field)
superposed_grids = np.zeros((nNodes))
for iGrid in range(nGrids):
block_x_start = get_block_origin(grid_origin, grid_width, iGrid)
zeros_beyond_mesh = 0
value_grid = u2r.simple_interpolate_from_mesh_to_grid(value_mesh,x_all,x_ndgln,ddx,block_x_start,nx,ny,nz,zeros_beyond_mesh, nEl,nloc,nNodes,nScalar_test,nDim,1)
zeros_beyond_grid = 1
value_back_on_mesh = u2r.interpolate_from_grid_to_mesh(value_grid, block_x_start, ddx, x_all, zeros_beyond_grid, nScalar_test,nx,ny,nz,nNodes,nDim, 1)
superposed_grids = superposed_grids + np.rint(np.squeeze(value_back_on_mesh))
# superpose the solutions on the mesh and detect how many 2s are present, indicating a node at which the solution is duplicated
superposed_grids = np.array(superposed_grids, dtype='int')
duplicated_nodal_values = []
for iNode in range(nNodes):
if superposed_grids[iNode] == 0:
print('zero:', iNode)
elif superposed_grids[iNode] == 2:
print('two:', iNode)
duplicated_nodal_values.append(iNode)
elif superposed_grids[iNode] != 1:
print('unknown:', iNode, superposed_grids[iNode])
print('error - currently can handle a node being on only two grids, not more')
sys.exit()
return duplicated_nodal_values
def read_in_snapshots_interpolate_to_grids(snapshot_data_location, snapshot_file_base, mesh_info, grid_info, nTime, offset, nScalar, x_all, x_ndgln):
nNodes, nEl, nloc, nDim, nFields, field_names = get_mesh_info(mesh_info)
nx, ny, nz, nGrids, ddx, grid_origin, grid_width = get_grid_info(grid_info)
snapshots_data = []
for iField in range(nFields):
nDoF = nNodes # could be different value per field
snapshots_data.append(np.zeros((nx*ny*nz*nDim, nGrids*nTime)))
for iTime in range(nTime):
#print('')
#print('time level', iTime)
filename = snapshot_data_location + snapshot_file_base + str(offset+iTime) + '.vtu'
vtu_data = vtktools.vtu(filename)
for iField in range(nFields):
my_field = vtu_data.GetField(field_names[iField])[:,0:nDim]
for iGrid in range(nGrids):
block_x_start = get_block_origin(grid_origin, grid_width, iGrid)
if iTime==0:
print('block_x_start', block_x_start)
# shuld this be up with other grid settings
#ddx = np.array((0.01,0.01)) #np.array((0.002,0.002))
value_mesh = np.zeros((nScalar,nNodes,nTime)) # nTime
value_mesh[:,:,iTime] = np.transpose(my_field)
# interpolate field onto structured mesh
# feed in one result at t time (no need to store in value_mesh in this case)
zeros_beyond_mesh = 0 # 0 extrapolate solution (for the cylinder in fpc); 1 gives zeros for nodes outside mesh
value_grid = u2r.simple_interpolate_from_mesh_to_grid(value_mesh[:,:,iTime],x_all,x_ndgln,ddx,block_x_start,nx,ny,nz,zeros_beyond_mesh,nEl,nloc,nNodes,nScalar,nDim,1)
snapshots_data[iField][:,iTime*nGrids+iGrid] = value_grid.reshape(-1)
return snapshots_data
def write_sing_values(singular_values, field_names):
for iField in range(len(field_names)):
f= open('singular_values_' + field_names[iField] + '.dat',"w+")
f.write('# index, s_values, normalised s_values, cumulative energy \n' )
s_values = singular_values[iField]
total = 0.0
for k in range(len(s_values)):
total = total + s_values[k]*s_values[k]
running_total = 0.0
for i in range(len(s_values)):
running_total = running_total + s_values[i]*s_values[i]
f.write ('%d %g %g %18.10g \n' % (i, s_values[i], s_values[i]/s_values[0], running_total/total) )
f.close()
return
def get_POD_bases(mesh_info, grid_info, snapshots_data, nPOD):
#nNodes, nEl, nloc, nDim, nFields, field_names = get_mesh_info(mesh_info)
nFields = mesh_info.nFields
nDim = mesh_info.nDim
field_names = mesh_info.field_names
nx = grid_info.nx; ny = grid_info.ny; nz = grid_info.nz
bases = []
singular_values = []
for iField in range(len(field_names)):
snapshots_matrix = snapshots_data[iField]
nrows, ncols = snapshots_matrix.shape
if nrows > ncols:
SSmatrix = np.dot(snapshots_matrix.T, snapshots_matrix)
else:
SSmatrix = np.dot(snapshots_matrix, snapshots_matrix.T)
print('WARNING - CHECK HOW THE BASIS FUNCTIONS ARE CALCULATED WITH THIS METHOD')
print('SSmatrix', SSmatrix.shape)
#print('SSmatrix', SSmatrix)
eigvalues, v = np.linalg.eigh(SSmatrix)
eigvalues = eigvalues[::-1]
# get rid of small negative eigenvalues
eigvalues[eigvalues<0] = 0
s_values = np.sqrt(eigvalues)
singular_values.append(s_values)
cumulative_info = np.zeros(len(eigvalues))
for j in range(len(eigvalues)):
if j==0:
cumulative_info[j] = eigvalues[j]
else:
cumulative_info[j] = cumulative_info[j-1] + eigvalues[j]
cumulative_info = cumulative_info / cumulative_info[-1]
nAll = len(eigvalues)
if nPOD[iField] == -1:
# SVD truncation - percentage of information captured or number
cumulative_tol = nirom_options.compression.cumulative_tol[iField]
nPOD_iField = sum(cumulative_info <= cumulative_tol) #tolerance
nPOD[iField] = nPOD_iField
elif nPOD[iField] == -2:
nPOD_iField = nAll
nPOD[iField] = nPOD_iField
else:
nPOD_iField = nPOD[iField]
print("retaining", nPOD_iField, "basis functions of a possible", len(eigvalues))
basis_functions = np.zeros((nx*ny*nz*nDim,nPOD_iField))
for j in reversed(range(nAll-nPOD_iField,nAll)):
Av = np.dot(snapshots_matrix,v[:,j])
basis_functions[:,nAll-j-1] = Av/np.linalg.norm(Av)
bases.append(basis_functions)
write_sing_values(singular_values, field_names)
return bases
def reconstruct_data_on_mesh(snapshots_data, mesh_info, grid_info, bases, nScalar, nTime, x_all, duplicated_nodal_values):
nNodes, nEl, nloc, nDim, nFields, field_names = get_mesh_info(mesh_info)
nx, ny, nz, nGrids, ddx, grid_origin, grid_width = get_grid_info(grid_info)
reconstructed_data = []
for iField in range(nFields):
basis = bases[iField]
snapshots_matrix = snapshots_data[iField]
print('snapshots_matrix', snapshots_matrix.shape)
reconstruction_on_mesh = np.zeros((nScalar*nTime,nNodes))
#reconstruction_on_mesh_from_one_grid = np.zeros((nScalar,nNodes))
for iGrid in range(nGrids):
#:,iTime*nGrids+iGrid
# want solutions in time for a particular grid
snapshots_per_grid = np.zeros((nx*ny*nz*nDim,nTime))
for iTime in range(nTime):
#print('taking snapshots from', iTime*nGrids+iGrid )
snapshots_per_grid[:,iTime] = snapshots_matrix[:,iTime*nGrids+iGrid]
reconstruction = np.dot( basis, np.dot( basis.T, snapshots_per_grid ) )
#print('reconstruction', reconstruction.shape)
#reconstruction_data.append(reconstruction)
#print ('recon shape',reconstruction.shape)
reconstruction_grid = reconstruction.reshape(nScalar,nx,ny,nTime)
#print ('recon shape just before interpolating back onto mesh',reconstruction.reshape(nScalar,nx,ny,nTime).shape)
# plot solution on each grid at 4 time steps
#fig, axs = plt.subplots(2, 2, figsize=(15,15))
#if iGrid==0:
# levels = np.linspace(0, 4, 5)
#elif iGrid==1:
# levels = np.linspace(5, 9, 5)
#icount = 0
#for col in range(2):
# for row in range(2):
# ax = axs[row, col]
# ax.set_title('time '+str(icount))
# pcm = ax.contourf(reconstruction_grid[0,:,:,icount],levels=levels)
# fig.colorbar(pcm,ax=ax)
# icount += 1
#plt.show()
block_x_start = get_block_origin(grid_origin, grid_width, iGrid)
if iTime==0:
print('block_x_start', block_x_start)
for iTime in range(nTime):
zeros_beyond_grid = 1 # 0 extrapolate solution; 1 gives zeros for nodes outside grid
reconstruction_on_mesh_from_one_grid = u2r.interpolate_from_grid_to_mesh(reconstruction_grid[:,:,:,iTime], block_x_start, ddx, x_all, zeros_beyond_grid, nScalar,nx,ny,nz,nNodes,nDim, 1)
#print('reconstruction_on_mesh_from_one_grid - about to add solutions',reconstruction_on_mesh_from_one_grid.shape)
reconstruction_on_mesh[nScalar*iTime:nScalar*(iTime+1),:] = reconstruction_on_mesh[nScalar*iTime:nScalar*(iTime+1),:] + np.squeeze(reconstruction_on_mesh_from_one_grid)
reconstruction_on_mesh[:,duplicated_nodal_values] = 0.5*reconstruction_on_mesh[:,duplicated_nodal_values]
reconstructed_data.append(reconstruction_on_mesh)
return reconstructed_data
def get_original_data_from_vtu_files(snapshot_data_location, snapshot_file_base, offset, mesh_info, nTime):
nNodes, nEl, nloc, nDim, nFields, field_names = get_mesh_info(mesh_info)
original_data = []
# nDoF = nNodes # could be different value per field
original =
|
np.zeros((nNodes, nDim*nTime))
|
numpy.zeros
|
from dataclasses import dataclass, replace
from functools import cached_property
from typing import Tuple, Type, Union, Optional, Dict, Callable, List
from collections import namedtuple
from itertools import dropwhile
from warnings import warn
import numpy as np
from numpy import ndarray
from scipy.spatial import cKDTree
from ..element import (Element, ElementHex1, ElementQuad1, ElementQuad2,
ElementTetP1, ElementTriP1, ElementTriP2, ElementLineP1,
ElementTetP2, ElementHex2, BOUNDARY_ELEMENT_MAP)
@dataclass(repr=False)
class Mesh:
doflocs: ndarray
t: ndarray
_boundaries: Optional[Dict[str, ndarray]] = None
_subdomains: Optional[Dict[str, ndarray]] = None
elem: Type[Element] = Element
affine: bool = False
validate: bool = False # unused; for backwards compatibility
# Some parts of the library, most notably the normal vector construction in
# ElementGlobal._eval_dofs, assume that the element indices are ascending
# because this leads to consistent normal vectors for both elements sharing
# a facet. Therefore, the element indices are sorted in a triangle mesh.
# However, some algorithms (e.g., adaptive refinement) require switching
# off this behaviour and, hence, this flag exists.
sort_t: bool = False
@property
def p(self):
return self.doflocs
@property
def dofs(self):
from skfem.assembly import Dofs
if not hasattr(self, '_dofs'):
self._dofs = Dofs(self, self.elem())
return self._dofs
@property
def refdom(self):
return self.elem.refdom
@property
def brefdom(self):
return self.elem.refdom.brefdom
@property
def bndelem(self):
return BOUNDARY_ELEMENT_MAP[self.elem]()
@property
def nelements(self):
return self.t.shape[1]
@property
def nvertices(self):
return np.max(self.t) + 1
@property
def nfacets(self):
return self.facets.shape[1]
@property
def nedges(self):
return self.edges.shape[1]
@property
def nnodes(self):
return self.t.shape[0]
@property
def subdomains(self):
return self._subdomains
@property
def boundaries(self):
return self._boundaries
@property
def facets(self):
if not hasattr(self, '_facets'):
self._init_facets()
return self._facets
@property
def t2f(self):
if not hasattr(self, '_t2f'):
self._init_facets()
return self._t2f
@property
def f2t(self):
if not hasattr(self, '_f2t'):
self._f2t = self.build_inverse(self.t, self.t2f)
return self._f2t
@property
def edges(self):
if not hasattr(self, '_edges'):
self._init_edges()
return self._edges
@property
def t2e(self):
if not hasattr(self, '_t2e'):
self._init_edges()
return self._t2e
@cached_property
def bbox(self):
"""Bounding box"""
return np.array([np.min(self.p, axis=1), np.max(self.p, axis=1)]).T
def dim(self):
return self.elem.refdom.dim()
def boundary_facets(self) -> ndarray:
"""Return an array of boundary facet indices."""
return np.nonzero(self.f2t[1] == -1)[0]
def boundary_edges(self) -> ndarray:
"""Return an array of boundary edge indices."""
facets = self.boundary_facets()
boundary_edges = np.sort(np.hstack(
tuple([np.vstack((self.facets[itr, facets],
self.facets[(itr + 1) % self.facets.shape[0],
facets]))
for itr in range(self.facets.shape[0])])).T, axis=1)
edge_candidates = np.unique(self.t2e[:, self.f2t[0, facets]])
A = self.edges[:, edge_candidates].T
B = boundary_edges
dims = A.max(0) + 1
ix = np.where(np.in1d(np.ravel_multi_index(A.T, dims),
np.ravel_multi_index(B.T, dims)))[0]
return edge_candidates[ix]
def with_boundaries(self,
boundaries: Dict[str, Callable[[ndarray], ndarray]]):
"""Return a copy of the mesh with named boundaries.
Parameters
----------
boundaries
A dictionary of lambda functions with the names of the boundaries
as keys. The midpoint of the facet should return ``True`` for the
corresponding lambda function if the facet belongs to the boundary.
"""
return replace(
self,
_boundaries={
**({} if self._boundaries is None else self._boundaries),
**{name: self.facets_satisfying(test, True)
for name, test in boundaries.items()}
},
)
def with_subdomains(self,
subdomains: Dict[str, Callable[[ndarray], ndarray]]):
"""Return a copy of the mesh with named subdomains.
Parameters
----------
boundaries
A dictionary of lambda functions with the names of the subdomains
as keys. The midpoint of the element should return ``True`` for
the corresponding lambda function if the element belongs to the
subdomain.
"""
return replace(
self,
_subdomains={
**({} if self._subdomains is None else self._subdomains),
**{name: self.elements_satisfying(test)
for name, test in subdomains.items()},
}
)
def boundary_nodes(self) -> ndarray:
"""Return an array of boundary node indices."""
return np.unique(self.facets[:, self.boundary_facets()])
def interior_nodes(self) -> ndarray:
"""Return an array of interior node indices."""
return np.setdiff1d(np.arange(0, self.p.shape[1]),
self.boundary_nodes())
def nodes_satisfying(self,
test: Callable[[ndarray], ndarray],
boundaries_only: bool = False) -> ndarray:
"""Return nodes that satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the set of nodes that are to
be included in the return set.
boundaries_only
If ``True``, include only boundary facets.
"""
nodes = np.nonzero(test(self.p))[0]
if boundaries_only:
nodes = np.intersect1d(nodes, self.boundary_nodes())
return nodes
def facets_satisfying(self,
test: Callable[[ndarray], ndarray],
boundaries_only: bool = False) -> ndarray:
"""Return facets whose midpoints satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the facet midpoints that are
to be included in the return set.
boundaries_only
If ``True``, include only boundary facets.
"""
midp = [np.sum(self.p[itr, self.facets], axis=0) / self.facets.shape[0]
for itr in range(self.dim())]
facets = np.nonzero(test(np.array(midp)))[0]
if boundaries_only:
facets = np.intersect1d(facets, self.boundary_facets())
return facets
def elements_satisfying(self,
test: Callable[[ndarray], ndarray]) -> ndarray:
"""Return elements whose midpoints satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the element midpoints that
are to be included in the return set.
"""
midp = [np.sum(self.p[itr, self.t], axis=0) / self.t.shape[0]
for itr in range(self.dim())]
return np.nonzero(test(np.array(midp)))[0]
def _expand_facets(self, ix: ndarray) -> Tuple[ndarray, ndarray]:
"""Return vertices and edges corresponding to given facet indices.
Parameters
----------
ix
An array of facet indices.
"""
vertices = np.unique(self.facets[:, ix].flatten())
if self.dim() == 3:
edge_candidates = self.t2e[:, self.f2t[0, ix]].flatten()
# subset of edges that share all points with the given facets
subset = np.nonzero(
np.prod(np.isin(self.edges[:, edge_candidates],
self.facets[:, ix].flatten()),
axis=0)
)[0]
edges = np.intersect1d(self.boundary_edges(),
edge_candidates[subset])
else:
edges = np.array([], dtype=np.int64)
return vertices, edges
class FakeMesh:
def __init__(self, p, t, facets, t2f, f2t, refdom):
self.p = p
self.t = t
self.facets = facets
self.t2f = t2f
self.f2t = f2t
self.refdom = refdom
def dim(self):
return self.refdom.dim()
def _mapping(self):
"""Return a default reference mapping for the mesh."""
from skfem.mapping import MappingAffine, MappingIsoparametric
if not hasattr(self, '_cached_mapping'):
fakemesh = Mesh.FakeMesh(self.doflocs, self.dofs.element_dofs,
self.facets, self.t2f, self.f2t,
self.elem.refdom)
if self.affine:
self._cached_mapping = MappingAffine(fakemesh)
else:
self._cached_mapping = MappingIsoparametric(
fakemesh,
self.elem(),
self.bndelem,
)
return self._cached_mapping
def _init_facets(self):
"""Initialize ``self.facets``."""
self._facets, self._t2f = self.build_entities(
self.t,
self.elem.refdom.facets,
)
def _init_edges(self):
"""Initialize ``self.edges``."""
self._edges, self._t2e = self.build_entities(
self.t,
self.elem.refdom.edges,
)
def __post_init__(self):
"""Support node orders used in external formats.
We expect ``self.doflocs`` to be ordered based on the
degrees-of-freedom in :class:`skfem.assembly.Dofs`. External formats
for high order meshes commonly use a less strict ordering scheme and
the extra nodes are described as additional rows in ``self.t``. This
method attempts to accommodate external formats by reordering
``self.doflocs`` and changing the indices in ``self.t``.
"""
if self.sort_t:
self.t = np.sort(self.t, axis=0)
if not isinstance(self.doflocs, ndarray):
# for backwards compatibility: support standard lists
self.doflocs = np.array(self.doflocs, dtype=np.float64)
if not isinstance(self.t, ndarray):
# for backwards compatibility: support standard lists
self.t = np.array(self.t, dtype=np.int64)
M = self.elem.refdom.nnodes
if self.nnodes > M:
# reorder DOFs to the expected format: vertex DOFs are first
p, t = self.doflocs, self.t
t_nodes = t[:M]
uniq, ix = np.unique(t_nodes, return_inverse=True)
self.t = (np.arange(len(uniq), dtype=np.int64)[ix]
.reshape(t_nodes.shape))
doflocs = np.hstack((
p[:, uniq],
np.zeros((p.shape[0], np.max(t) + 1 - len(uniq))),
))
doflocs[:, self.dofs.element_dofs[M:].flatten('F')] =\
p[:, t[M:].flatten('F')]
self.doflocs = doflocs
# C_CONTIGUOUS is more performant in dimension-based slices
if self.doflocs.flags['F_CONTIGUOUS']:
if self.doflocs.shape[1] > 1000:
warn("Transforming over 1000 vertices to C_CONTIGUOUS.")
self.doflocs = np.ascontiguousarray(self.doflocs)
if self.t.flags['F_CONTIGUOUS']:
if self.t.shape[1] > 1000:
warn("Transforming over 1000 elements to C_CONTIGUOUS.")
self.t = np.ascontiguousarray(self.t)
def __add__(self, other):
"""Join two meshes."""
if not isinstance(other, type(self)):
raise TypeError("Can only join meshes with same type.")
p = np.hstack((self.p, other.p))
t = np.hstack((self.t, other.t + self.p.shape[1]))
tmp = np.ascontiguousarray(p.T)
tmp, ixa, ixb = np.unique(tmp.view([('', tmp.dtype)] * tmp.shape[1]),
return_index=True, return_inverse=True)
p = p[:, ixa]
t = ixb[t]
cls = type(self)
return cls(p, t)
def __repr__(self):
return "{} mesh with {} vertices and {} elements.".format(
self.elem.refdom.name,
self.nvertices,
self.nelements,
)
def __str__(self):
return self.__repr__()
def save(self,
filename: str,
point_data: Optional[Dict[str, ndarray]] = None,
**kwargs) -> None:
"""Export the mesh and fields using meshio.
Parameters
----------
filename
The output filename, with suffix determining format;
e.g. .msh, .vtk, .xdmf
point_data
Data related to the vertices of the mesh.
"""
from skfem.io.meshio import to_file
return to_file(self, filename, point_data, **kwargs)
@classmethod
def load(cls, filename):
from skfem.io.meshio import from_file
return from_file(filename)
@classmethod
def from_dict(cls, data):
"""For backwards compatibility."""
if 'p' not in data or 't' not in data:
raise ValueError("Dictionary must contain keys 'p' and 't'.")
else:
data['p'] = np.ascontiguousarray(np.array(data['p']).T)
data['t'] = np.ascontiguousarray(np.array(data['t']).T)
if 'boundaries' in data and data['boundaries'] is not None:
data['boundaries'] = {k: np.array(v)
for k, v in data['boundaries'].items()}
if 'subdomains' in data and data['subdomains'] is not None:
data['subdomains'] = {k: np.array(v)
for k, v in data['subdomains'].items()}
data['doflocs'] = data.pop('p')
data['_subdomains'] = data.pop('subdomains')
data['_boundaries'] = data.pop('boundaries')
return cls(**data)
def to_dict(self) -> Dict[str, Optional[Dict[str, List[float]]]]:
"""For backwards compatibility."""
boundaries: Optional[Dict[str, List[float]]] = None
subdomains: Optional[Dict[str, List[float]]] = None
if self.boundaries is not None:
boundaries = {k: v.tolist() for k, v in self.boundaries.items()}
if self.subdomains is not None:
subdomains = {k: v.tolist() for k, v in self.subdomains.items()}
return {
'p': self.p.T.tolist(),
't': self.t.T.tolist(),
'boundaries': boundaries,
'subdomains': subdomains,
}
@classmethod
def from_mesh(cls, mesh):
"""Reuse an existing mesh by adding nodes.
Parameters
----------
mesh
The mesh used in the initialization. Connectivity of the new mesh
will match ``mesh.t``.
"""
from skfem.assembly import Dofs
mapping = mesh._mapping()
nelem = cls.elem
dofs = Dofs(mesh, nelem())
locs = mapping.F(nelem.doflocs.T)
doflocs = np.zeros((locs.shape[0], dofs.N))
# match mapped dofs and global dof numbering
for itr in range(locs.shape[0]):
for jtr in range(dofs.element_dofs.shape[0]):
doflocs[itr, dofs.element_dofs[jtr]] = locs[itr, :, jtr]
return cls(
doflocs=doflocs,
t=mesh.t,
)
@classmethod
def init_refdom(cls):
"""Initialize a mesh corresponding to the reference domain."""
return cls(cls.elem.refdom.p, cls.elem.refdom.t)
def refined(self, times_or_ix: Union[int, ndarray] = 1):
"""Return a refined mesh.
Parameters
----------
times_or_ix
Either an integer giving the number of uniform refinements or an
array of element indices for adaptive refinement.
"""
m = self
if isinstance(times_or_ix, int):
for _ in range(times_or_ix):
m = m._uniform()
else:
m = m._adaptive(times_or_ix)
return m
def scaled(self, factors):
"""Return a new mesh with scaled dimensions.
Parameters
----------
factors
Scale each dimension by a factor.
"""
if isinstance(factors, float):
# for backwards compatibility
factors = self.doflocs.shape[0] * [factors]
return replace(
self,
doflocs=np.array([self.doflocs[itr] * factors[itr]
for itr in range(len(factors))]),
)
def translated(self, diffs):
"""Return a new translated mesh.
Parameters
----------
diffs
Translate the mesh by a vector. Must have same size as the mesh
dimension.
"""
return replace(
self,
doflocs=np.array([self.doflocs[itr] + diffs[itr]
for itr in range(len(diffs))]),
)
def mirrored(self,
normal: Tuple[float, ...],
point: Optional[Tuple[float, ...]] = None):
"""Return a mesh mirrored with respect to a normal.
Meant to be combined with the other methods to build more general
meshes, e.g.,
>>> from skfem import MeshTet
>>> m1 = MeshTet()
>>> m2 = m1.mirrored((1, 0, 0))
>>> m3 = m1.mirrored((0, 1, 0))
>>> m4 = m1.mirrored((0, 0, 1))
>>> m = m1 + m2 + m3 + m4
>>> (m.nvertices, m.nelements)
(20, 20)
Parameters
----------
normal
The normal vector of the mirror plane.
point
An optional point through which the plane passes. By default, the
point corresponds to the origin.
"""
if point is None:
point = (0,) * self.dim()
p = self.p.copy()
p0 = np.array(point)
n = np.array(normal)
n = n / np.linalg.norm(n)
p += - 2. * np.dot(n, p - p0[:, None]) * n[:, None] + p0[:, None]
return replace(
self,
doflocs=p,
)
def _uniform(self):
"""Perform a single uniform refinement."""
raise NotImplementedError
def _adaptive(self, ix: ndarray):
"""Adaptively refine the given set of elements."""
raise NotImplementedError
def _splitref(self, nrefs: int = 1):
"""Split mesh into separate nonconnected elements and refine.
Used for visualization purposes.
Parameters
----------
nrefs
The number of refinements.
"""
cls = type(self)
m = cls.init_refdom().refined(nrefs)
X = m.p
x = self._mapping().F(m.p)
# create connectivity for the new mesh
nt = self.nelements
t = np.tile(m.t, (1, nt))
dt = np.max(t)
t += ((dt + 1)
* (np.tile(np.arange(nt), (m.t.shape[0] * m.t.shape[1], 1))
.flatten('F')
.reshape((-1, m.t.shape[0])).T))
if X.shape[0] == 1:
p = np.array([x.flatten()])
else:
p = x[0].flatten()
for itr in range(len(x) - 1):
p = np.vstack((p, x[itr + 1].flatten()))
return cls(p, t)
@staticmethod
def build_entities(t, indices, sort=True):
"""Build low dimensional topological entities."""
indexing = np.hstack(tuple([t[ix] for ix in indices]))
sorted_indexing = np.sort(indexing, axis=0)
sorted_indexing, ixa, ixb = np.unique(sorted_indexing,
axis=1,
return_index=True,
return_inverse=True)
mapping = ixb.reshape((len(indices), t.shape[1]))
if sort:
return np.ascontiguousarray(sorted_indexing), mapping
return np.ascontiguousarray(indexing[:, ixa]), mapping
@staticmethod
def build_inverse(t, mapping):
"""Build inverse mapping from low dimensional topological entities."""
e = mapping.flatten(order='C')
tix = np.tile(np.arange(t.shape[1]), (1, t.shape[0]))[0]
e_first, ix_first = np.unique(e, return_index=True)
e_last, ix_last = np.unique(e[::-1], return_index=True)
ix_last = e.shape[0] - ix_last - 1
inverse = np.zeros((2, np.max(mapping) + 1), dtype=np.int64)
inverse[0, e_first] = tix[ix_first]
inverse[1, e_last] = tix[ix_last]
inverse[1, np.nonzero(inverse[0] == inverse[1])[0]] = -1
return inverse
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
"""Fallback for 3D meshes."""
return p
def param(self) -> float:
"""Return mesh parameter, viz the length of the longest edge."""
raise NotImplementedError
def _reix(self, ix: ndarray) -> Tuple[ndarray, ndarray]:
"""Connect ``self.p`` based on the indices ``ix``."""
ixuniq = np.unique(ix)
t = np.zeros(np.max(ix) + 1, dtype=np.int64)
t[ixuniq] = np.arange(len(ixuniq), dtype=np.int64)
return self.p[:, ixuniq], t[ix]
def remove_elements(self, element_indices: ndarray):
"""Construct a new mesh by removing elements.
Parameters
----------
element_indices
List of element indices to remove.
"""
p, t = self._reix(np.delete(self.t, element_indices, axis=1))
return replace(
self,
doflocs=p,
t=t,
)
def element_finder(self, mapping=None):
"""Return a function handle from location to element index.
Parameters
----------
mapping
The affine mapping for the mesh.
"""
raise NotImplementedError
@dataclass(repr=False)
class Mesh2D(Mesh):
def param(self) -> float:
return np.max(
np.linalg.norm(np.diff(self.p[:, self.facets], axis=1), axis=0)
)
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
"""For meshio which appends :math:`z = 0` to 2D meshes."""
return p[:, :2]
def _repr_svg_(self) -> str:
from skfem.visuals.svg import draw
return draw(self, nrefs=2, boundaries_only=True)
@dataclass(repr=False)
class Mesh3D(Mesh):
def param(self) -> float:
return np.max(
np.linalg.norm(np.diff(self.p[:, self.edges], axis=1), axis=0)
)
def edges_satisfying(self, test: Callable[[ndarray], bool]) -> ndarray:
"""Return edges whose midpoints satisfy some condition.
Parameters
----------
test
Evaluates to 1 or ``True`` for edge midpoints of the edges
belonging to the output set.
"""
return np.nonzero(test(self.p[:, self.edges].mean(1)))[0]
def boundary_edges(self) -> ndarray:
"""Return an array of boundary edge indices."""
facets = self.boundary_facets()
boundary_edges = np.sort(np.hstack(
tuple([np.vstack((self.facets[itr, facets],
self.facets[(itr + 1) % self.facets.shape[0],
facets]))
for itr in range(self.facets.shape[0])])).T, axis=1)
edge_candidates = np.unique(self.t2e[:, self.f2t[0, facets]])
A = self.edges[:, edge_candidates].T
B = boundary_edges
dims = A.max(0) + 1
ix = np.where(np.in1d(np.ravel_multi_index(A.T, dims),
np.ravel_multi_index(B.T, dims)))[0]
return edge_candidates[ix]
def interior_edges(self) -> ndarray:
"""Return an array of interior edge indices."""
return np.setdiff1d(np.arange(self.edges.shape[1], dtype=np.int64),
self.boundary_edges())
@dataclass(repr=False)
class MeshTri1(Mesh2D):
doflocs: ndarray = np.array([[0., 0.],
[1., 0.],
[0., 1.],
[1., 1.]], dtype=np.float64).T
t: ndarray = np.array([[0, 1, 2],
[1, 3, 2]], dtype=np.int64).T
elem: Type[Element] = ElementTriP1
affine: bool = True
sort_t: bool = True
@classmethod
def init_tensor(cls: Type, x: ndarray, y: ndarray):
r"""Initialize a tensor product mesh.
The mesh topology is as follows::
*---------------*
|'-.|'-.|`'---._|
|---+---+-------|
|\ |\ |'. |
| \ | \ | '-. |
| \| \| '.|
*---------------*
Parameters
----------
x
The nodal coordinates in dimension `x`.
y
The nodal coordinates in dimension `y`.
"""
npx = len(x)
npy = len(y)
X, Y = np.meshgrid(np.sort(x), np.sort(y))
p = np.vstack((X.flatten('F'), Y.flatten('F')))
ix = np.arange(npx * npy)
nt = (npx - 1) * (npy - 1)
t = np.zeros((3, 2 * nt))
ix = ix.reshape(npy, npx, order='F').copy()
t[0, :nt] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, :nt] = (ix[1:npy, 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, :nt] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[0, nt:] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, nt:] = (ix[0:(npy-1), 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, nt:] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
return cls(p, t.astype(np.int64))
@classmethod
def init_symmetric(cls: Type) -> Mesh2D:
r"""Initialize a symmetric mesh of the unit square.
The mesh topology is as follows::
*------------*
|\ /|
| \ / |
| \ / |
| * |
| / \ |
| / \ |
|/ \|
O------------*
"""
p = np.array([[0., 1., 1., 0., .5],
[0., 0., 1., 1., .5]], dtype=np.float64)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 3, 4],
[0, 3, 4]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_sqsymmetric(cls: Type) -> Mesh2D:
r"""Initialize a symmetric mesh of the unit square.
The mesh topology is as follows::
*------*------*
|\ | /|
| \ | / |
| \ | / |
*------*------*
| / | \ |
| / | \ |
|/ | \|
O------*------*
"""
p = np.array([[0., .5, 1., 0., .5, 1., 0., .5, 1.],
[0., 0., 0., .5, .5, .5, 1., 1., 1.]], dtype=np.float64)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 4, 5],
[0, 3, 4],
[3, 4, 6],
[4, 6, 7],
[4, 7, 8],
[4, 5, 8]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_lshaped(cls: Type) -> Mesh2D:
r"""Initialize a mesh for the L-shaped domain.
The mesh topology is as follows::
*-------*
| \ |
| \ |
| \ |
|-------O-------*
| / | \ |
| / | \ |
| / | \ |
*---------------*
"""
p = np.array([[0., 1., 0., -1., 0., -1., -1., 1.],
[0., 0., 1., 0., -1., -1., 1., -1.]], dtype=np.float64)
t = np.array([[0, 1, 7],
[0, 2, 6],
[0, 6, 3],
[0, 7, 4],
[0, 4, 5],
[0, 3, 5]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_circle(cls: Type,
nrefs: int = 3) -> Mesh2D:
r"""Initialize a circle mesh.
Works by repeatedly refining the following mesh and moving
new nodes to the boundary::
*
/ | \
/ | \
/ | \
*------O------*
\ | /
\ | /
\ | /
*
Parameters
----------
nrefs
Number of refinements, by default 3.
"""
p = np.array([[0., 0.],
[1., 0.],
[0., 1.],
[-1., 0.],
[0., -1.]], dtype=np.float64).T
t = np.array([[0, 1, 2],
[0, 1, 4],
[0, 2, 3],
[0, 3, 4]], dtype=np.int64).T
m = cls(p, t)
for _ in range(nrefs):
m = m.refined()
D = m.boundary_nodes()
tmp = m.p
tmp[:, D] = tmp[:, D] / np.linalg.norm(tmp[:, D], axis=0)
m = replace(m, doflocs=tmp)
return m
def _uniform(self):
p = self.doflocs
t = self.t
sz = p.shape[1]
t2f = self.t2f.copy() + sz
return replace(
self,
doflocs=np.hstack((p, p[:, self.facets].mean(axis=1))),
t=np.hstack((
np.vstack((t[0], t2f[0], t2f[2])),
np.vstack((t[1], t2f[0], t2f[1])),
np.vstack((t[2], t2f[2], t2f[1])),
np.vstack((t2f[0], t2f[1], t2f[2])),
)),
_boundaries=None,
_subdomains=None,
)
@staticmethod
def _adaptive_sort_mesh(p, t):
"""Make (0, 2) the longest edge in t."""
l01 = np.sqrt(np.sum((p[:, t[0]] - p[:, t[1]]) ** 2, axis=0))
l12 = np.sqrt(np.sum((p[:, t[1]] - p[:, t[2]]) ** 2, axis=0))
l02 = np.sqrt(np.sum((p[:, t[0]] - p[:, t[2]]) ** 2, axis=0))
ix01 = (l01 > l02) * (l01 > l12)
ix12 = (l12 > l01) * (l12 > l02)
# row swaps
tmp = t[2, ix01]
t[2, ix01] = t[1, ix01]
t[1, ix01] = tmp
tmp = t[0, ix12]
t[0, ix12] = t[1, ix12]
t[1, ix12] = tmp
return t
@staticmethod
def _adaptive_find_facets(m, marked_elems):
"""Find the facets to split."""
facets = np.zeros(m.facets.shape[1], dtype=np.int64)
facets[m.t2f[:, marked_elems].flatten('F')] = 1
prev_nnz = -1e10
while np.count_nonzero(facets) - prev_nnz > 0:
prev_nnz = np.count_nonzero(facets)
t2facets = facets[m.t2f]
t2facets[2, t2facets[0] + t2facets[1] > 0] = 1
facets[m.t2f[t2facets == 1]] = 1
return facets
@staticmethod
def _adaptive_split_elements(m, facets):
"""Define new elements."""
ix = (-1) * np.ones(m.facets.shape[1], dtype=np.int64)
ix[facets == 1] = (np.arange(np.count_nonzero(facets))
+ m.p.shape[1])
ix = ix[m.t2f]
red = (ix[0] >= 0) * (ix[1] >= 0) * (ix[2] >= 0)
blue1 = (ix[0] == -1) * (ix[1] >= 0) * (ix[2] >= 0)
blue2 = (ix[0] >= 0) * (ix[1] == -1) * (ix[2] >= 0)
green = (ix[0] == -1) * (ix[1] == -1) * (ix[2] >= 0)
rest = (ix[0] == -1) * (ix[1] == -1) * (ix[2] == -1)
# new red elements
t_red = np.hstack((
np.vstack((m.t[0, red], ix[0, red], ix[2, red])),
np.vstack((m.t[1, red], ix[0, red], ix[1, red])),
np.vstack((m.t[2, red], ix[1, red], ix[2, red])),
np.vstack((ix[1, red], ix[2, red], ix[0, red])),
))
# new blue elements
t_blue1 = np.hstack((
np.vstack((m.t[1, blue1], m.t[0, blue1], ix[2, blue1])),
np.vstack((m.t[1, blue1], ix[1, blue1], ix[2, blue1])),
np.vstack((m.t[2, blue1], ix[2, blue1], ix[1, blue1])),
))
t_blue2 = np.hstack((
np.vstack((m.t[0, blue2], ix[0, blue2], ix[2, blue2])),
np.vstack((ix[2, blue2], ix[0, blue2], m.t[1, blue2])),
np.vstack((m.t[2, blue2], ix[2, blue2], m.t[1, blue2])),
))
# new green elements
t_green = np.hstack((
np.vstack((m.t[1, green], ix[2, green], m.t[0, green])),
np.vstack((m.t[2, green], ix[2, green], m.t[1, green])),
))
# new nodes
p = .5 * (m.p[:, m.facets[0, facets == 1]] +
m.p[:, m.facets[1, facets == 1]])
return (
np.hstack((m.p, p)),
np.hstack((m.t[:, rest], t_red, t_blue1, t_blue2, t_green)),
)
def _adaptive(self, marked):
sorted_mesh = replace(
self,
t=self._adaptive_sort_mesh(self.p, self.t),
sort_t=False,
)
facets = self._adaptive_find_facets(sorted_mesh, marked)
doflocs, t = self._adaptive_split_elements(sorted_mesh, facets)
return replace(
self,
doflocs=doflocs,
t=t,
_boundaries=None,
_subdomains=None,
)
def element_finder(self, mapping=None):
if mapping is None:
mapping = self._mapping()
tree = cKDTree(np.mean(self.p[:, self.t], axis=1).T)
def finder(x, y):
ix = tree.query(np.array([x, y]).T, 5)[1].flatten()
X = mapping.invF(np.array([x, y])[:, None], ix)
inside = (
(X[0] >= 0) *
(X[1] >= 0) *
(1 - X[0] - X[1] >= 0)
)
return np.array([ix[np.argmax(inside, axis=0)]]).flatten()
return finder
@dataclass(repr=False)
class MeshQuad1(Mesh2D):
doflocs: ndarray = np.array([[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]], dtype=np.float64).T
t: ndarray = np.array([[0, 1, 2, 3]], dtype=np.int64).T
elem: Type[Element] = ElementQuad1
def _uniform(self):
p = self.doflocs
t = self.t
sz = p.shape[1]
t2f = self.t2f.copy() + sz
mid = np.arange(t.shape[1], dtype=np.int64) + np.max(t2f) + 1
return replace(
self,
doflocs=np.hstack((
p,
p[:, self.facets].mean(axis=1),
p[:, self.t].mean(axis=1),
)),
t=np.hstack((
np.vstack((t[0], t2f[0], mid, t2f[3])),
np.vstack((t2f[0], t[1], t2f[1], mid)),
np.vstack((mid, t2f[1], t[2], t2f[2])),
np.vstack((t2f[3], mid, t2f[2], t[3])),
)),
_boundaries=None,
_subdomains=None,
)
@classmethod
def init_tensor(cls: Type,
x: ndarray,
y: ndarray):
"""Initialize a tensor product mesh.
The mesh topology is as follows::
*-------------*
| | | |
|---+--+------|
| | | |
| | | |
| | | |
*-------------*
Parameters
----------
x
The nodal coordinates in dimension `x`.
y
The nodal coordinates in dimension `y`.
"""
npx = len(x)
npy = len(y)
X, Y = np.meshgrid(np.sort(x), np.sort(y))
p = np.vstack((X.flatten('F'), Y.flatten('F')))
ix = np.arange(npx * npy)
nt = (npx - 1) * (npy - 1)
t = np.zeros((4, nt))
ix = ix.reshape(npy, npx, order='F').copy()
t[0] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1] = (ix[1:npy, 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[3] = (ix[0:(npy-1), 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
return cls(p, t.astype(np.int64))
def to_meshtri(self, x: Optional[ndarray] = None):
"""Split each quadrilateral into two triangles."""
t = np.hstack((self.t[[0, 1, 3]], self.t[[1, 2, 3]]))
subdomains = None
if self.subdomains:
subdomains = {k: np.concatenate((v, v + self.t.shape[1]))
for k, v in self.subdomains.items()}
mesh = MeshTri1(self.doflocs, t)
boundaries = None
if self.boundaries:
boundaries = {}
for k in self.boundaries:
slots = enumerate(mesh.facets.T)
boundaries[k] = np.array([
next(dropwhile(lambda slot: not(np.array_equal(f,
slot[1])),
slots))[0]
for f in self.facets.T[np.sort(self.boundaries[k])]])
if self._subdomains or self._boundaries:
mesh = replace(
mesh,
_boundaries=boundaries,
_subdomains=subdomains,
)
if x is not None:
if len(x) == self.t.shape[1]:
# preserve elemental constant functions
X = np.concatenate((x, x))
else:
raise Exception("The parameter x must have one value per "
"element.")
return mesh, X
return mesh
def element_finder(self, mapping=None):
"""Transform to :class:`skfem.MeshTri` and return its finder."""
tri_finder = self.to_meshtri().element_finder()
def finder(*args):
return tri_finder(*args) % self.t.shape[1]
return finder
@dataclass(repr=False)
class MeshTri2(MeshTri1):
elem: Type[Element] = ElementTriP2
affine: bool = False
sort_t: bool = False
@classmethod
def init_circle(cls: Type,
nrefs: int = 3) -> Mesh2D:
m = MeshTri1.init_circle(nrefs=nrefs)
M = cls.from_mesh(m)
D = M.dofs.get_facet_dofs(M.boundary_facets()).flatten()
doflocs = M.doflocs.copy()
doflocs[:, D] /= np.linalg.norm(doflocs[:, D], axis=0)
return replace(M, doflocs=doflocs)
@dataclass(repr=False)
class MeshQuad2(MeshQuad1):
elem: Type[Element] = ElementQuad2
@dataclass(repr=False)
class MeshLine1(Mesh):
doflocs: ndarray = np.array([[0., 1.]], dtype=np.float64)
t: ndarray =
|
np.array([[0], [1]], dtype=np.int64)
|
numpy.array
|
"""Local orbital reference frame
"""
from numpy.linalg import norm
import numpy as np
def _split(orbit):
return orbit[:3], orbit[3:]
def to_tnw(orbit):
"""In the TNW Local Orbital Reference Frame, x is oriented along the velocity vector,
z along the angular momentum, and y complete the frame.
Args:
orbit (list): Array of length 6
Return:
numpy.ndarray: matrix to convert from inertial frame to TNW.
>>> delta_tnw = [1, 0, 0]
>>> p = [-6142438.668, 3492467.560, -25767.25680]
>>> v = [505.8479685, 942.7809215, 7435.922231]
>>> pv = p + v
>>> mat = to_tnw(pv).T
>>> delta_inert = mat @ delta_tnw
>>> all(delta_inert == v / norm(v))
True
"""
pos, vel = _split(orbit)
t = vel / norm(vel)
w = np.cross(pos, vel) / (norm(pos) * norm(vel))
n = np.cross(w, t)
return np.array([t, n, w])
def to_qsw(orbit):
"""In the QSW Local Orbital Reference Frame, x is oriented along the position vector,
z along the angular momentum, and y complete the frame.
The frame is sometimes also called RSW (where R stands for radial) or LVLH (Local
Vertical Local Horizontal).
Args:
orbit (list): Array of length 6
Return:
numpy.ndarray: matrix to convert from inertial frame to QSW
>>> delta_qsw = [1, 0, 0]
>>> p = [-6142438.668, 3492467.560, -25767.25680]
>>> v = [505.8479685, 942.7809215, 7435.922231]
>>> pv = p + v
>>> mat = to_qsw(pv).T
>>> delta_inert = mat @ delta_qsw
>>> all(delta_inert == p / norm(p))
True
"""
pos, vel = _split(orbit)
q = pos / norm(pos)
w = np.cross(pos, vel) / (norm(pos) * norm(vel))
s =
|
np.cross(w, q)
|
numpy.cross
|
from __future__ import annotations, print_function
import csv
import logging
import os.path
import sys
import time
from functools import wraps
from typing import List, Tuple
import matplotlib
# pylint: disable=wrong-import-position
matplotlib.use("Agg")
# pylint: enable=wrong-import-position
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from libmuscle import Instance, Message
from ymmsl import Operator
if os.getenv("FLEE_TYPE_CHECK") is not None and os.environ["FLEE_TYPE_CHECK"].lower() == "true":
from beartype import beartype as check_args_type
else:
def check_args_type(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class CouplingInterface:
"""
The Coupling Interface class
"""
@check_args_type
def __init__(
self,
e,
submodel: str,
instance_index: int = None,
num_instances: int = None,
coupling_type: str = "file",
weather_coupling: bool = False,
outputdir: str = "out",
log_exchange_data: bool = True,
) -> None:
"""
e = FLEE ecosystem
Coupling types to support eventually:
- file
- MPWide
- one-sided store
- repository coupling.
- muscle
"""
self.coupling_type = coupling_type
# coupling definitions.
self.e = e
self.location_ids = []
self.location_names = []
self.ghost_location_ids = []
self.ghost_location_names = []
self.names = []
self.directions = []
self.intervals = []
self.outputdir = outputdir
self.submodel = submodel # micro / macro / manager
self.instance_index = instance_index
self.num_instances = num_instances
# for logging
self.log_exchange_data = log_exchange_data
if self.coupling_type == "muscle3":
self.logger = logging.getLogger()
self.logger.propagate = False
if self.submodel in ["macro", "micro"]:
self.instance = Instance({Operator.O_I: ["out"], Operator.S: ["in"]})
elif self.submodel in ["macro_manager", "micro_manager"]:
self.instance = Instance({Operator.O_I: ["out[]"], Operator.S: ["in[]"]})
# pylint: disable=missing-function-docstring
def reuse_coupling(self):
if self.coupling_type == "file":
return True
if self.coupling_type == "muscle3":
return self.instance.reuse_instance()
@check_args_type
def addCoupledLocation(
self, location, name: str, direction: str = "inout", interval: int = 1
) -> None:
"""
Adds a locations to the so-called *Coupled Region*.
Args:
location (Location): is the (p)Flee location object.
name (str): Name is a location identifier that is identical to the one
in the other code.
direction (str, optional): Direction can be (once the code is done):
- `out` -> agents are removed and stored in the coupling link.
- `in` -> agents written to the coupling link by the other process
are added to this location.
- `inout` -> both out and in.
- `inout indirect` -> changes in agent numbers are stored in the
coupling link. No agents are added or removed.
interval (int, optional): is the timestep interval of the coupling, ensuring that the
coupling activity is performed every <interval> time steps.
"""
if location.name not in self.location_names:
self.location_ids += [self.e._convert_location_name_to_index(name=location.name)]
self.location_names += [location.name]
"""
disabled by HAMID
print("Adding coupled location {} {} {}".format(
location.name, direction, interval), file=sys.stderr)
"""
self.names += [name]
self.directions += [direction]
self.intervals += [interval]
self.coupling_rank = True
if hasattr(self.e, "mpi") and self.e.mpi is not None:
if self.e.mpi.rank > 0:
self.coupling_rank = False
else:
print(
"{} --> warning: coupled location [{}] is selected twice "
"(ignore this if a location is both a coupled location and "
"a conflict location). Only one coupled location will be "
"created.".format(self.submodel, location.name),
file=sys.stderr,
)
def addGhostLocations(self, ig) -> None:
"""
Summary
Args:
ig (Type[InputGeography]): Description
"""
conflict_name_list = ig.getConflictLocationNames()
print("Adding Ghosts", file=sys.stderr)
for conflict_name in conflict_name_list:
for i, location_name in enumerate(self.e.locationNames):
if location_name == conflict_name:
loc = self.e.locations[i]
# print("L", loc.name, len(loc.links), file=sys.stderr)
if len(loc.links) == 0:
if loc.name not in self.location_names:
print("Adding ghost location {}".format(loc.name), file=sys.stderr)
self.addCoupledLocation(
location=loc, name=loc.name, direction="out", interval=1
)
def addMicroConflictLocations(self, ig) -> None:
"""
Summary
Args:
ig (Type[InputGeography]): Description
"""
conflict_name_list = ig.getConflictLocationNames()
print("Adding micro conflict coupling", file=sys.stderr)
for conflict_name in conflict_name_list:
for i, location_name in enumerate(self.e.locationNames):
if location_name == conflict_name:
loc = self.e.locations[i]
# print("L", loc.name, len(loc.links), file=sys.stderr)
print(
"Adding micro coupled conflict location {}".format(loc.name),
file=sys.stderr,
)
self.addCoupledLocation(location=loc, name=loc.name, direction="in", interval=1)
@check_args_type
def Couple(self, time: int) -> None:
"""
Summary
Args:
time (int): Description
"""
newAgents = None
# for the time being all intervals will have to be the same...
# for current interval=1 we can ignore this check, but it should be
# added later if we need higher interval values here
if time % self.intervals[0] == 0:
# if True:
if self.coupling_type == "muscle3":
if self.coupling_rank:
# If MPI is used, this will be the process with rank 0
if self.submodel == "macro_manager":
# collect output from each macro instance
newAgents = {}
for slot in range(self.num_instances):
msg = self.instance.receive("in", slot)
curr_newAgent = self.extractNewAgentsFromCSVString(
csv_string=msg.data["newAgents"].split("\n")
)
if len(newAgents) == 0:
newAgents = curr_newAgent
else:
for name, newAgents_num in curr_newAgent.items():
if not isinstance(newAgents[name], list):
newAgents[name] = [newAgents[name]]
newAgents[name].append(newAgents_num)
# combined founded newAgents per location by each
# instance into one
# for now, we use arithmetic mean,
# we may need to change it to another approach later
for name in newAgents:
newAgents[name] = int(round(np.mean(newAgents[name])))
data_to_micro = "\n".join(
"{},{}".format(key, value) for key, value in newAgents.items()
)
for slot in range(self.num_instances):
self.instance.send("out", Message(time, None, data_to_micro), slot)
elif self.submodel == "micro_manager":
# receive from micro
newAgents = {}
for slot in range(self.num_instances):
msg = self.instance.receive("in", slot)
curr_newAgent = self.extractNewAgentsFromCSVString(
csv_string=msg.data["newAgents"].split("\n")
)
if len(newAgents) == 0:
newAgents = curr_newAgent
else:
for name, newAgents_num in curr_newAgent.items():
if not isinstance(newAgents[name], list):
newAgents[name] = [newAgents[name]]
newAgents[name].append(newAgents_num)
# combined founded newAgents per location by
# each instance into one
# for now, we use arithmetic mean,
# we may need to change it to another approach later
for name in newAgents:
newAgents[name] = int(round(np.mean(newAgents[name])))
data_to_macro = "\n".join(
"{},{}".format(key, value) for key, value in newAgents.items()
)
# send to macro
for slot in range(self.num_instances):
self.instance.send("out", Message(time, None, data_to_macro), slot)
elif self.submodel in ["macro", "micro"]:
newAgents_str = self.generateOutputCSVString()
# here, in addition to newAgents, we can also pass
# other variables if are required
msg = {"newAgents": newAgents_str}
self.instance.send("out", Message(time, None, msg))
if self.log_exchange_data is True:
self.logExchangeData(t=time)
msg = self.instance.receive("in")
newAgents = self.extractNewAgentsFromCSVString(
csv_string=msg.data.split("\n")
)
# If MPI is used, broadcast newAgents to all other processes
if hasattr(self.e, "mpi") and self.e.mpi is not None:
newAgents = self.e.mpi.comm.bcast(newAgents, root=0)
elif self.coupling_type == "file":
# default is coupling through file IO.
if self.coupling_rank:
# If MPI is used, this will be the process with rank 0
self.writeOutputToFile(day=time)
if self.log_exchange_data is True:
self.logExchangeData(t=time)
newAgents = self.readInputFromFile(t=time)
if self.submodel in ["micro", "macro"]:
self.e.clearLocationsFromAgents(location_names=self.location_names)
for i in range(0, len(self.location_names)):
# write departing agents to file
# read incoming agents from file
# print(self.names, i, newAgents)
if "in" in self.directions[i]:
print(
"Couple IN: {} {}".format(self.names[i], newAgents[self.names[i]]),
file=sys.stderr,
)
if self.names[i] in newAgents:
self.e.insertAgents(
location=self.e.locations[self.location_ids[i]],
number=newAgents[self.names[i]],
)
if hasattr(self.e, "mpi"):
self.e.updateNumAgents(log=False)
@check_args_type
def setCouplingChannel(self, outputchannel: str, inputchannel: str) -> None:
"""
Sets the coupling output file name (for file coupling).
Name should be WITHOUT .csv extension.
Args:
outputchannel (str): Description
inputchannel (str): Description
"""
if self.coupling_type == "file":
self.outputfilename = outputchannel
self.inputfilename = inputchannel
@check_args_type
def generateOutputCSVString(self) -> str:
"""
Summary
Returns:
str: Description
"""
out_csv_string = ""
for i, location_id in enumerate(self.location_ids):
if "out" in self.directions[i]:
out_csv_string += "{},{}\n".format(
self.names[i], self.e.locations[location_id].numAgents
)
print(
"Couple OUT: {} {}".format(
self.names[i], self.e.locations[location_id].numAgents
),
file=sys.stderr,
)
return out_csv_string
@check_args_type
def extractNewAgentsFromCSVString(self, csv_string: List[str]) -> dict:
"""
Reads in a CSV string with coupling information, and extracts a list
of New Agents.
Args:
csv_string (List[str]): Description
Returns:
dict: Description
"""
newAgents = {}
for line in csv_string:
row = line.split(",")
if len(row[0]) == 0:
continue
if row[0][0] == "#":
pass
else:
for i in range(0, len(self.location_ids)):
if row[0] == self.names[i]:
newAgents[self.names[i]] = int(row[1])
return newAgents
@check_args_type
def writeOutputToFile(self, day: int) -> None:
"""
Summary
Args:
day (int): Description
"""
out_csv_string = self.generateOutputCSVString()
csv_outputfile_name = "{}[{}].{}.csv".format(self.outputfilename, self.instance_index, day)
csv_outputfile_path = os.path.join(self.outputdir, "file", "coupled", csv_outputfile_name)
with open(csv_outputfile_path, "a", encoding="utf-8") as file:
file.write(out_csv_string)
print(
"{}[{}] t={} Couple: output written to {}".format(
self.submodel, self.instance_index, day, csv_outputfile_name
),
file=sys.stderr,
)
@check_args_type
def waitForInputFiles(self, check_dir: str, in_fnames: dict) -> None:
"""
Summary
Args:
check_dir (str): Description
in_fnames (str): Description
"""
# input format for in_fnames : [dic{"fileName",False}]
founded_files = 0
# wait until input files from all instances are available
while founded_files != len(in_fnames):
time.sleep(0.1)
for fname in in_fnames:
if in_fnames[fname] is False:
if os.path.exists(os.path.join(check_dir, fname)):
in_fnames[fname] = True
founded_files += 1
@check_args_type
def readInputFromFile(self, t: int) -> dict:
"""
Returns a dictionary with key <coupling name> and
value <number of agents>.
Args:
t (int): Description
Returns:
dict: Description
"""
in_fnames = {}
for i in range(self.num_instances):
fname = "{}[{}].{}.csv".format(self.inputfilename, i, t)
in_fnames[fname] = False
dirInputFiles = os.path.join(self.outputdir, self.coupling_type, "coupled")
# wait until input files from all instances are available
self.waitForInputFiles(check_dir=dirInputFiles, in_fnames=in_fnames)
# aggrgate newAgents from each input files
aggNewAgents = {}
for fname in in_fnames:
with open(os.path.join(dirInputFiles, fname), encoding="utf-8") as csvfile:
csv_string = csvfile.read().split("\n")
curr_newAgent = self.extractNewAgentsFromCSVString(csv_string=csv_string)
if len(aggNewAgents) == 0:
aggNewAgents = curr_newAgent
else:
for name, newAgents_num in curr_newAgent.items():
if not isinstance(aggNewAgents[name], list):
aggNewAgents[name] = [aggNewAgents[name]]
aggNewAgents[name].append(newAgents_num)
# combined founded newAgents per location by each instance into one
# for now, we use arithmetic mean,
# we may need to change it to another approach later
for name in aggNewAgents:
aggNewAgents[name] = int(round(np.mean(aggNewAgents[name])))
return aggNewAgents
# ------------------------------------------------------------------------
# log Exchanged Data
# ------------------------------------------------------------------------
def saveExchangeDataToFile(self) -> None:
"""
Summary
"""
# save logTotalAgents to file
if hasattr(self, "logTotalAgents"):
filename = "logTotalAgents_{}[{}].csv".format(self.submodel, self.instance_index)
outputfile = os.path.join(
self.outputdir, self.coupling_type, "log_exchange_data", filename
)
# output csv header
header_csv = "day,total_agents"
with open(outputfile, "a", encoding="utf-8") as file:
file.write("{}\n".format(header_csv))
csvWriter = csv.writer(file, delimiter=",")
csvWriter.writerows(self.logTotalAgents)
# save logLocationsNumAgents to file
if hasattr(self, "logLocationsNumAgents"):
filename = "logLocationsNumAgents_{}[{}].csv".format(self.submodel, self.instance_index)
outputfile = os.path.join(
self.outputdir, self.coupling_type, "log_exchange_data", filename
)
# output csv header
header_csv = "day"
for i in range(0, len(self.location_ids)):
if "out" in self.directions[i]:
header_csv += ",{}".format(self.names[i])
with open(outputfile, "a", encoding="utf-8") as file:
file.write("{}\n".format(header_csv))
csvWriter = csv.writer(file, delimiter=",")
csvWriter.writerows(self.logLocationsNumAgents)
# save logNewRefugees to file
if hasattr(self, "logNewRefugees"):
filename = "logNewRefugees_{}[{}].csv".format(self.submodel, self.instance_index)
outputfile = os.path.join(
self.outputdir, self.coupling_type, "log_exchange_data", filename
)
# output csv header
header_csv = "day,new_refs"
with open(outputfile, "a", encoding="utf-8") as file:
file.write("{}\n".format(header_csv))
csvWriter = csv.writer(file, delimiter=",")
csvWriter.writerows(self.logNewRefugees)
@check_args_type
def logNewAgents(self, t: int, new_refs: int) -> None:
"""
create log all variables only if they are not exist
Args:
t (int): Description
new_refs (int): Description
"""
if not hasattr(self, "logNewRefugees"):
self.logNewRefugees = []
# logNewRefugees.append([day,new_refs])
self.logNewRefugees.append([t, new_refs])
@check_args_type
def logExchangeData(self, t: int) -> None:
"""
Summary
Args:
t (int): Description
"""
# save log of total agents
if not hasattr(self, "logTotalAgents"):
self.logTotalAgents = []
# logTotalAgents.append([day,total_agents])
self.logTotalAgents.append([t, self.e.total_agents])
# save log of numAgents in locations
if not hasattr(self, "logLocationsNumAgents"):
self.logLocationsNumAgents = []
# logLocationsNumAgents.append([day,location_ids[i]].numAgents])
data = [t]
for i, location_id in enumerate(self.location_ids):
if "out" in self.directions[i]:
data.append(self.e.locations[location_id].numAgents)
self.logLocationsNumAgents.append(data)
def sumOutputCSVFiles(self) -> None:
"""
Summary
"""
in_fnames = {}
for i in range(self.num_instances):
fname = "out[{}].csv".format(i)
in_fnames[fname] = False
dirInputFiles = os.path.join(self.outputdir, self.coupling_type, self.submodel)
# wait until input files from all instances are available
self.waitForInputFiles(check_dir=dirInputFiles, in_fnames=in_fnames)
dfs = []
for fname in in_fnames:
df = pd.read_csv(os.path.join(dirInputFiles, fname), index_col=None, header=0)
dfs.append(df)
frame = pd.concat(dfs, axis=0, ignore_index=True).groupby(["Day"]).mean()
for column_name in list(frame):
if "error" not in column_name.lower():
frame[column_name].round(0).astype(int)
df.to_csv(os.path.join(dirInputFiles, "out.csv"), encoding="utf-8", index=False)
# ------------------------------------------------------------------------
# Plotting functions
# ------------------------------------------------------------------------
def plotExchangedData(self) -> None:
"""
Summary
"""
if hasattr(self, "logTotalAgents"):
self.plotTotalAgentsHistory()
if hasattr(self, "logLocationsNumAgents"):
self.plotLocationsNumAgentsHistory()
if hasattr(self, "logNewRefugees"):
self.plotNewRefugeesHistory()
def plotLocationsNumAgentsHistory(self) -> None:
"""
Summary
"""
in_fnames = {}
for i in range(self.num_instances):
fname = "logLocationsNumAgents_{}[{}].csv".format(self.submodel, i)
in_fnames[fname] = False
dirInputFiles = os.path.join(self.outputdir, self.coupling_type, "log_exchange_data")
# wait until input files from all instances are available
self.waitForInputFiles(check_dir=dirInputFiles, in_fnames=in_fnames)
csv_header = []
for i in range(0, len(self.location_ids)):
if "out" in self.directions[i]:
csv_header.append(self.names[i])
days, LocationsNumAgents = self.readCSVLogFiles(
dirInputFiles=dirInputFiles,
inputFileNames=list(in_fnames.keys()),
columnHeader=csv_header
)
# plot data
TOTAL = float(len(LocationsNumAgents))
COLUMNS = 4
# Compute Rows required
ROWS = int(TOTAL / COLUMNS)
ROWS += TOTAL % COLUMNS > 0
# Create a Position index
POSITION = range(1, int(TOTAL) + 1)
cmp = sns.color_palette("colorblind", len(LocationsNumAgents))
fig = plt.figure(figsize=(16, 7))
# fig.suptitle(title)
LINE_STYLES = ["solid", "dashed", "dotted"]
NUM_STYLES = len(LINE_STYLES)
for i, (loc_name, loc_res) in enumerate(LocationsNumAgents.items()):
fig.add_subplot(ROWS, COLUMNS, POSITION[i], frameon=False)
# xlabel=xlabel, ylabel=ylabel)
set_legend = True
for res in loc_res:
if set_legend:
plt.plot(
days,
res,
color=cmp[i],
linestyle=LINE_STYLES[i % NUM_STYLES],
label=loc_name,
)
set_legend = False
else:
plt.plot(days, res, color=cmp[i], linestyle=LINE_STYLES[i % NUM_STYLES])
plt.legend()
_, ymax = plt.ylim() # returns bottom and top of the current ylim
plt.ylim(0, ymax if ymax > 1 else 1)
plt.tight_layout()
outputPlotFile = os.path.join(
self.outputdir,
self.coupling_type,
"plot_exchange_data",
"plotLocationsNumAgents[{}].pdf".format(self.submodel),
)
plt.savefig(outputPlotFile)
def plotTotalAgentsHistory(self) -> None:
"""
Summary
"""
in_fnames = {}
for i in range(self.num_instances):
fname = "logTotalAgents_{}[{}].csv".format(self.submodel, i)
in_fnames[fname] = False
dirInputFiles = os.path.join(self.outputdir, self.coupling_type, "log_exchange_data")
# wait until input files from all instances are available
self.waitForInputFiles(check_dir=dirInputFiles, in_fnames=in_fnames)
csv_header = ["total_agents"]
days, TotalAgents = self.readCSVLogFiles(
dirInputFiles=dirInputFiles,
inputFileNames=list(in_fnames.keys()),
columnHeader=csv_header
)
# plotting preparation
cmp = sns.color_palette("colorblind", self.num_instances)
fig = plt.figure()
_ = fig.add_subplot(111, xlabel="day", ylabel="total_agents")
# plot newAgents from each input files
for _, data in TotalAgents.items():
for i, total_agents in enumerate(data):
label = "{}[{}]".format(self.submodel, i)
plt.plot(days, total_agents, color=cmp[i], label=label)
plt.legend()
plt.tight_layout()
outputPlotFile = os.path.join(
self.outputdir,
self.coupling_type,
"plot_exchange_data",
"plotTotalAgents[{}].pdf".format(self.submodel),
)
plt.savefig(outputPlotFile)
def plotNewRefugeesHistory(self) -> None:
"""
Summary
"""
in_fnames = {}
for i in range(self.num_instances):
fname = "logNewRefugees_{}[{}].csv".format(self.submodel, i)
in_fnames[fname] = False
dirInputFiles = os.path.join(self.outputdir, self.coupling_type, "log_exchange_data")
# wait until input files from all instances are available
self.waitForInputFiles(check_dir=dirInputFiles, in_fnames=in_fnames)
csv_header = ["new_refs"]
days, NewRefugees = self.readCSVLogFiles(
dirInputFiles=dirInputFiles,
inputFileNames=list(in_fnames.keys()),
columnHeader=csv_header
)
# plotting preparation
cmp = sns.color_palette("colorblind", self.num_instances)
fig = plt.figure()
_ = fig.add_subplot(111, xlabel="day", ylabel="NewRefugees")
# plot NewRefugees from each input files
for _, data in NewRefugees.items():
for i, total_agents in enumerate(data):
label = "{}[{}]".format(self.submodel, i)
# for fist day, we have high number of NewRefugees
plt.plot(days[1:], total_agents[1:], color=cmp[i], label=label)
plt.legend()
plt.tight_layout()
outputPlotFile = os.path.join(
self.outputdir,
self.coupling_type,
"plot_exchange_data",
"plotNewRefugees[{}].pdf".format(self.submodel),
)
plt.savefig(outputPlotFile)
@check_args_type
def readCSVLogFiles(
self, dirInputFiles: str, inputFileNames: List[str], columnHeader: List[str]
) -> Tuple[np.ndarray, dict]:
"""
Summary
Args:
dirInputFiles (str): Description
inputFileNames (List[str]): Description
columnHeader (List[str]): Description
No Longer Returned:
Tuple[np.ndarray, dict]: Description
"""
dict_res = {}
days = np.array([])
for name in columnHeader:
dict_res[name] = []
for fname in inputFileNames:
with open(os.path.join(dirInputFiles, fname), "rb") as csvfile:
data =
|
np.loadtxt(csvfile, delimiter=",", skiprows=1)
|
numpy.loadtxt
|
import itertools
from statsmodels.tsa.statespace.sarimax import SARIMAX
import pandas as pd
import forecasting
import warnings
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
def sarima_model(train, test, trend_order, seasonal_order, forecast_period):
model = SARIMAX(train, order=trend_order, seasonal_order=seasonal_order)
model_fit = model.fit()
yhat = model_fit.forecast(forecast_period)
return yhat, model_fit.summary()
# Walk Forward Validation with expanding window approach. Model is fit with training data with initial starting size. Test set is the next time step in the sequence. A one-step forecast is made.
# In each iteration the test value of the previous iteration is added to the training set.
def sarimaWalkForwardVal(series, window_size, trend_order, seasonal_order, forecast_period):
forecasted_values = list()
seriesLen = len(series)
for i in range(window_size, seriesLen):
# Split data into train and test.
train, test = series[0:i],series[i:i+1]
# fit sarima model with current SARIMA configuration and forecast 1 time step ahead
yhat, model = sarima_model(train, test, trend_order, seasonal_order, forecast_period)
forecasted_values.append(yhat[0])
test = series[window_size:seriesLen]
mape = np.mean(
|
np.abs(forecasted_values - test)
|
numpy.abs
|
import shutil
import os
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true, assert_raises
import mne
import hcp
from mne.utils import _TempDir
from hcp.tests import config as tconf
from hcp.io.read import _hcp_pick_info
hcp_params = dict(hcp_path=tconf.hcp_path,
subject=tconf.test_subject)
def test_read_annot():
"""Test reading annotations."""
for run_index in tconf.run_inds:
annots = hcp.read_annot(data_type='rest', run_index=run_index,
**hcp_params)
# channels
assert_equal(list(sorted(annots['channels'])),
['all', 'ica', 'manual', 'neigh_corr',
'neigh_stdratio'])
for channels in annots['channels'].values():
for chan in channels:
assert_true(chan in tconf.bti_chans)
# segments
assert_equal(list(sorted(annots['ica'])),
['bad', 'brain_ic', 'brain_ic_number',
'brain_ic_vs', 'brain_ic_vs_number',
'ecg_eog_ic', 'flag', 'good',
'physio', 'total_ic_number'])
for components in annots['ica'].values():
if len(components) > 0:
assert_true(min(components) >= 0)
assert_true(max(components) <= 248)
def _basic_raw_checks(raw):
"""Helper for testing raw files """
picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
assert_equal(len(picks), 248)
ch_names = [raw.ch_names[pp] for pp in picks]
assert_true(all(ch.startswith('A') for ch in ch_names))
ch_sorted = list(sorted(ch_names))
assert_true(ch_sorted != ch_names)
assert_equal(
|
np.round(raw.info['sfreq'], 4)
|
numpy.round
|
import os
import numpy as np
import pytest
from autolens import exc
from autolens.data.array.util import array_util, grid_util
from autolens.data.array import mask as msk
from autolens.data.array import scaled_array
test_data_dir = "{}/../../test_files/array/".format(os.path.dirname(os.path.realpath(__file__)))
@pytest.fixture(name="array_grid")
def make_array_grid():
return scaled_array.ScaledSquarePixelArray(np.zeros((5, 5)), pixel_scale=0.5)
@pytest.fixture(name="array_grid_rectangular")
def make_array_grid_rectangular():
return scaled_array.ScaledRectangularPixelArray(np.zeros((5, 5)), pixel_scales=(1.0, 0.5))
class TestArrayGeometry:
class TestArrayAndTuples:
def test__square_pixel_array__input_data_grid_3x3__centre_is_origin(self):
data_grid = scaled_array.ScaledSquarePixelArray(array=np.ones((3, 3)), pixel_scale=1.0)
assert data_grid.pixel_scale == 1.0
assert data_grid.shape == (3, 3)
assert data_grid.central_pixel_coordinates == (1.0, 1.0)
assert data_grid.shape_arc_seconds == pytest.approx((3.0, 3.0))
assert data_grid.arc_second_maxima == (1.5, 1.5)
assert data_grid.arc_second_minima == (-1.5, -1.5)
assert (data_grid == np.ones((3, 3))).all()
def test__square_pixel_array__input_data_grid_rectangular__change_origin(self):
data_grid = scaled_array.ScaledSquarePixelArray(array=np.ones((4, 3)), pixel_scale=0.1, origin=(1.0, 1.0))
assert (data_grid == np.ones((4, 3))).all()
assert data_grid.pixel_scale == 0.1
assert data_grid.shape == (4, 3)
assert data_grid.central_pixel_coordinates == (1.5, 1.0)
assert data_grid.shape_arc_seconds == pytest.approx((0.4, 0.3))
assert data_grid.arc_second_maxima == pytest.approx((1.2, 1.15), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((0.8, 0.85), 1e-4)
data_grid = scaled_array.ScaledSquarePixelArray(array=np.ones((3, 4)), pixel_scale=0.1)
assert (data_grid == np.ones((3, 4))).all()
assert data_grid.pixel_scale == 0.1
assert data_grid.shape == (3, 4)
assert data_grid.central_pixel_coordinates == (1.0, 1.5)
assert data_grid.shape_arc_seconds == pytest.approx((0.3, 0.4))
assert data_grid.arc_second_maxima == pytest.approx((0.15, 0.2), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((-0.15, -0.2), 1e-4)
def test__rectangular_pixel_grid__input_data_grid_3x3(self):
data_grid = scaled_array.ScaledRectangularPixelArray(array=np.ones((3, 3)), pixel_scales=(2.0, 1.0))
assert data_grid == pytest.approx(np.ones((3, 3)), 1e-4)
assert data_grid.pixel_scales == (2.0, 1.0)
assert data_grid.shape == (3, 3)
assert data_grid.central_pixel_coordinates == (1.0, 1.0)
assert data_grid.shape_arc_seconds == pytest.approx((6.0, 3.0))
assert data_grid.arc_second_maxima == pytest.approx((3.0, 1.5), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((-3.0, -1.5), 1e-4)
def test__rectangular_pixel_grid__input_data_grid_rectangular(self):
data_grid = scaled_array.ScaledRectangularPixelArray(array=np.ones((4, 3)), pixel_scales=(0.2, 0.1))
assert data_grid == pytest.approx(np.ones((4, 3)), 1e-4)
assert data_grid.pixel_scales == (0.2, 0.1)
assert data_grid.shape == (4, 3)
assert data_grid.central_pixel_coordinates == (1.5, 1.0)
assert data_grid.shape_arc_seconds == pytest.approx((0.8, 0.3), 1e-3)
assert data_grid.arc_second_maxima == pytest.approx((0.4, 0.15), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((-0.4, -0.15), 1e-4)
data_grid = scaled_array.ScaledRectangularPixelArray(array=np.ones((3, 4)), pixel_scales=(0.1, 0.2))
assert data_grid == pytest.approx(np.ones((3, 4)), 1e-4)
assert data_grid.pixel_scales == (0.1, 0.2)
assert data_grid.shape == (3, 4)
assert data_grid.central_pixel_coordinates == (1.0, 1.5)
assert data_grid.shape_arc_seconds == pytest.approx((0.3, 0.8), 1e-3)
assert data_grid.arc_second_maxima == pytest.approx((0.15, 0.4), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((-0.15, -0.4), 1e-4)
def test__rectangular_pixel_array__input_data_grid_3x3__centre_is_yminus1_xminuss2(self):
data_grid = scaled_array.ScaledRectangularPixelArray(array=np.ones((3, 3)), pixel_scales=(2.0, 1.0),
origin=(-1.0, -2.0))
assert data_grid == pytest.approx(np.ones((3, 3)), 1e-4)
assert data_grid.pixel_scales == (2.0, 1.0)
assert data_grid.shape == (3, 3)
assert data_grid.central_pixel_coordinates == (1.0, 1.0)
assert data_grid.shape_arc_seconds == pytest.approx((6.0, 3.0))
assert data_grid.origin == (-1.0, -2.0)
assert data_grid.arc_second_maxima == pytest.approx((2.0, -0.5), 1e-4)
assert data_grid.arc_second_minima == pytest.approx((-4.0, -3.5), 1e-4)
class TestCentralPixel:
def test__square_pixel_grid(self):
grid = scaled_array.ScaledSquarePixelArray(np.zeros((3, 3)), pixel_scale=0.1)
assert grid.central_pixel_coordinates == (1, 1)
grid = scaled_array.ScaledSquarePixelArray(np.zeros((4, 4)), pixel_scale=0.1)
assert grid.central_pixel_coordinates == (1.5, 1.5)
grid = scaled_array.ScaledSquarePixelArray(np.zeros((5, 3)), pixel_scale=0.1, origin=(1.0, 2.0))
assert grid.central_pixel_coordinates == (2.0, 1.0)
def test__rectangular_pixel_grid(self):
grid = scaled_array.ScaledRectangularPixelArray(np.zeros((3, 3)), pixel_scales=(2.0, 1.0))
assert grid.central_pixel_coordinates == (1, 1)
grid = scaled_array.ScaledRectangularPixelArray(np.zeros((4, 4)), pixel_scales=(2.0, 1.0))
assert grid.central_pixel_coordinates == (1.5, 1.5)
grid = scaled_array.ScaledRectangularPixelArray(np.zeros((5, 3)), pixel_scales=(2.0, 1.0), origin=(1.0, 2.0))
assert grid.central_pixel_coordinates == (2, 1)
class TestGrids:
def test__square_pixel_grid__grid_2d__compare_to_array_util(self):
grid_2d_util = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(4, 7),
pixel_scales=(0.56, 0.56))
sca = scaled_array.ScaledSquarePixelArray(array=np.zeros((4, 7)), pixel_scale=0.56)
assert sca.grid_2d == pytest.approx(grid_2d_util, 1e-4)
def test__square_pixel_grid__array_3x3__sets_up_arc_second_grid(self):
sca = scaled_array.ScaledSquarePixelArray(array=np.zeros((3, 3)), pixel_scale=1.0)
assert (sca.grid_2d == np.array([[[1., -1.], [1., 0.], [1., 1.]],
[[0., -1.], [0., 0.], [0., 1.]],
[[-1., -1.], [-1., 0.], [-1., 1.]]])).all()
def test__square_pixel_grid__grid_1d__compare_to_array_util(self):
grid_1d_util = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(4, 7),
pixel_scales=(0.56, 0.56))
sca = scaled_array.ScaledSquarePixelArray(array=np.zeros((4, 7)), pixel_scale=0.56)
assert sca.grid_1d == pytest.approx(grid_1d_util, 1e-4)
def test__square_pixel_grid__nonzero_centres__compure_to_array_util(self):
grid_2d_util = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(4, 7),
pixel_scales=(0.56, 0.56),
origin=(1.0, 3.0))
sca = scaled_array.ScaledSquarePixelArray(array=np.zeros((4, 7)), pixel_scale=0.56, origin=(1.0, 3.0))
assert sca.grid_2d == pytest.approx(grid_2d_util, 1e-4)
grid_1d_util = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(4, 7),
pixel_scales=(0.56, 0.56),
origin=(-1.0, -4.0))
sca = scaled_array.ScaledSquarePixelArray(array=np.zeros((4, 7)), pixel_scale=0.56, origin=(-1.0, -4.0))
assert sca.grid_1d == pytest.approx(grid_1d_util, 1e-4)
def test__rectangular_pixel_grid__grid_2d__compare_to_array_util(self):
grid_2d_util = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(4, 7),
pixel_scales=(0.8, 0.56))
sca = scaled_array.ScaledRectangularPixelArray(array=np.zeros((4, 7)), pixel_scales=(0.8, 0.56))
assert sca.grid_2d == pytest.approx(grid_2d_util, 1e-4)
def test__rectangular_pixel_grid__array_3x3__sets_up_arcsecond_grid(self):
sca = scaled_array.ScaledRectangularPixelArray(array=
|
np.zeros((3, 3))
|
numpy.zeros
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model firing of a population using almost convolutional s.u. (experimental)
The declared class has attributes to train the model and
view the different components of the circuitry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import numpy as np
import tensorflow as tf
from absl import gfile
class AlmostConvolutionalExperimentalWdelOnly(object):
"""Model firing rate for a population by almost convolutional subunits."""
def __init__(self, loss_string, stim, resp, short_filename, window=2,
stride=1, lam_w=0, step_sz=1, n_cells=107, taskid=0):
"""Build the graph to predict population response using subunits.
Firing rate for cell c: lam_c = a_sfm_c'.relu(w.x + bias_su) + bias_cell,
x: stimulus, lam_c: firing rate of cell
bias_c and bias_su : cell and subunit bias
a_sfm_c = softmax(a) : so a cell cannot be connected to
all subunits equally well.
where w_i are over a small window which are
convolutionally related with each other.
w_i = w_mother + w_del_i,
where w_mother is common accross all 'windows' and
w_del is different for different windows.
Args:
loss_string : type of loss to use
stim: stimulus
resp : response
short_filename : filename to store results
window: (2*window +1) is the convolutional window size
stride: stride for convolutions
lam_w : regularizing w_del
step_sz : step size for SGD
n_cells : total number of cells in response tensor.
"""
# Add model specific names to filename.
short_filename = ('model=almost_convolutional_expt_wdel_only_window=' +
str(window) + '_stride=' + str(stride) +
'_lam_w=' + str(lam_w) + short_filename)
# Convolution parameters.
model_params = collections.namedtuple('model_params',
['mask_tf', 'dimx', 'dimy',
'n_pix', 'window', 'stride',
'n_cells'])
mask_tf, dimx, dimy, n_pix = get_windows(window, stride)
model_pars = model_params(mask_tf, dimx, dimy, n_pix,
window, stride, n_cells)
# Variables.
model_vars = self.build_variables(model_pars, taskid)
# Get firing rate.
lam, su_act = self.build_firing_rate(model_vars, model_pars, stim)
# Get loss according to specification.
loss_unregularized = get_loss(loss_string, lam, resp)
# Regularization skeeps 'delta' weights small.
regularization = lam_w * tf.reduce_sum(tf.nn.l2_loss(model_vars.w_del))
loss = loss_unregularized + regularization # add regularization
gradient_update = tf.train.AdagradOptimizer(step_sz).minimize(loss)
# Projection to satisfy hard variable constraints.
# Project only after gradient update.
with tf.control_dependencies([gradient_update]):
proj_ops= []
if taskid % 2 == 0:
bias_cell_project_positive = tf.assign(model_vars.bias_cell,
tf.nn.relu(
model_vars.bias_cell))
proj_ops += [bias_cell_project_positive]
if np.floor(taskid/2) % 2 == 0 :
scale_cell_project_positive = tf.assign(model_vars.scale_cell,
tf.nn.relu(model_vars.scale_cell))
proj_ops += [scale_cell_project_positive]
# Make a combined model update op.
#model_update = tf.group(gradient_update, b_project_positive, scale_cell_project_pos)
model_update = tf.group(gradient_update, *proj_ops)
# Make model probes.
model_probes = collections.namedtuple('model_probes',
['su_act', 'lam', 'loss',
'loss_unregularized'])
model_prb = model_probes(su_act, lam, loss, loss_unregularized)
self.stim = stim
self.resp = resp
self.params = model_pars
self.update = model_update
self.probes = model_prb
self.variables = model_vars
self.short_filename = short_filename
self.build_summaries()
def build_variables(self, model_pars, taskid):
"""Declare variables of the model."""
# Get convolutional windows.
dimx = model_pars.dimx
dimy = model_pars.dimy
n_pix = model_pars.n_pix
window = model_pars.window
n_cells = model_pars.n_cells
# Build model variables.
w_mother = tf.constant(np.array(np.zeros((2 * window + 1,
2 * window + 1, 1, 1)),
dtype='float32'), name='w_mother')
w_del = tf.Variable(np.array(0.5 + 0.25*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
a = tf.Variable(np.array(np.zeros((dimx*dimy, n_cells)),
dtype='float32'), name='a')
# declare bias_cell
if taskid % 2 == 0:
tf.logging.info('bias_cell is variable')
bias_cell = tf.Variable(np.array(0.000001*np.ones(n_cells),
dtype='float32'), name='bias_cell')
else:
tf.logging.info('bias_cell is constant')
bias_cell = tf.constant(np.array(0.000001*np.ones(n_cells),
dtype='float32'), name='bias_cell')
# declare scale_cell
if np.floor(taskid/2) % 2 == 0 :
tf.logging.info('scale_cell is variable')
scale_cell = tf.Variable(np.array(np.ones(n_cells),
dtype='float32'), name='scale_cell')
else:
tf.logging.info('scale_cell is constant')
scale_cell = tf.constant(np.array(np.ones(n_cells),
dtype='float32'), name='scale_cell')
# declare bias_su
if np.floor(taskid/4) % 2 ==0:
tf.logging.info('bias_su is variable')
bias_su = tf.Variable(np.array(0.000001*np.random.randn(1, dimx, dimy),
dtype='float32'), name='bias_su')
else:
tf.logging.info('bias_su is constant')
bias_su = tf.constant(np.array(0.000001*np.random.randn(1, dimx, dimy),
dtype='float32'), name='bias_su')
# Collect model parameters.
model_variables = collections.namedtuple('model_variables',
['w_mother', 'w_del', 'a',
'bias_cell', 'bias_su',
'scale_cell'])
model_vars = model_variables(w_mother, w_del, a,
bias_cell, bias_su, scale_cell)
return model_vars
def build_firing_rate(self, model_vars, model_pars, stim):
"""Compute the firing rate and subunit activations."""
# Get model parameters.
mask_tf = model_pars.mask_tf
dimx = model_pars.dimx
dimy = model_pars.dimy
stride = model_pars.stride
# Get model variables.
a = model_vars.a
w_mother = model_vars.w_mother
w_del = model_vars.w_del
bias_su = model_vars.bias_su
bias_cell = model_vars.bias_cell
scale_cell = model_vars.scale_cell
k_smoothing = 0.00000001
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
stim_4d = tf.expand_dims(tf.reshape(stim, (-1, 40, 80)), 3)
stim_convolved = tf.reduce_sum(tf.nn.conv2d(stim_4d, w_mother,
strides=[1, stride, stride, 1],
padding='VALID'), 3)
stim_masked = tf.nn.conv2d(stim_4d, mask_tf, strides=[1, stride, stride, 1],
padding='VALID')
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# Input from convolutional SU and delta SU.
su_act = tf.nn.relu(stim_del + stim_convolved + bias_su)
lam = (tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), a_sfm) * scale_cell +
bias_cell + k_smoothing)
return lam, su_act
def build_summaries(self):
"""Add some summaries."""
# Add mother subunit weights.
w_mother = self.variables.w_mother
mother_min = tf.reduce_min(w_mother)
mother_max = tf.reduce_max(w_mother - mother_min)
mother_rescaled = (w_mother - mother_min) / mother_max
mother_rescaled = tf.transpose(mother_rescaled, [3, 0, 1, 2])
tf.summary.image('mother', mother_rescaled)
# Create summary writers.
# Create histogram summary for all parameters which are learnt.
for ivar in self.variables:
tf.summary.histogram(ivar.name, ivar)
# Loss summary.
tf.summary.scalar('loss_total', self.probes.loss)
# Loss without regularization summary.
tf.summary.scalar('loss_unregularized', self.probes.loss_unregularized)
# Merge all the summary writer ops into one op (this way,
# calling one op stores all summaries)
merged = tf.summary.merge_all()
self.summary_op = merged
tf.logging.info('summary OP set')
def get_summary_op(self):
"""Return the summary op."""
return self.summary_op
def initialize_model(self, save_location, folder_name, sess, feed_dict=None):
"""Setup model variables and saving information."""
# TODO(bhaishahster): factor out 'session' from inside the library.
# Make folder.
self.initialize_folder(save_location, folder_name)
# Initialize variables.
self.initialize_variables(sess)
def initialize_folder(self, save_location, folder_name):
"""Intialize saving location of the model."""
parent_folder = os.path.join(save_location, folder_name)
# Make folder if it does not exist.
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
self.parent_folder = parent_folder
save_location = os.path.join(parent_folder, self.short_filename)
if not gfile.IsDirectory(save_location):
gfile.MkDir(save_location)
self.save_location = save_location
self.save_filename = os.path.join(self.save_location, self.short_filename)
def initialize_variables(self, sess):
"""Initialize variables or restore from previous fits."""
sess.run(tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables()))
saver_var = tf.train.Saver(tf.all_variables(),
keep_checkpoint_every_n_hours=4)
load_prev = False
start_iter = 0
try:
# Restore previous fits if they are available
# - useful when programs are preempted frequently on .
latest_filename = self.short_filename + '_latest_fn'
restore_file = tf.train.latest_checkpoint(self.save_location,
latest_filename)
# Restore previous iteration count and start from there.
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file) # restore variables
load_prev = True
except:
#tf.logging.info('Initializing variables from data')
#self.initialize_variables_from_data(sess)
tf.logging.info('No previous dataset')
if load_prev:
tf.logging.info('Previous results loaded from: ' + restore_file)
else:
tf.logging.info('Variables initialized')
writer = tf.summary.FileWriter(self.save_location + 'train', sess.graph)
tf.logging.info('Loaded iteration: %d' % start_iter)
self.saver_var = saver_var
self.iter = start_iter
self.writer = writer
def initialize_variables_from_data(self, sess, n_batches_init=20):
"""Initialize variables smartly by looking at some training data."""
tf.logging.info('Initializing variables from data')
# setup data threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
tf.logging.info('data threads started')
tf.logging.info('Initializing a from data')
resp_expanded = tf.expand_dims(tf.expand_dims(self.resp, 1), 2)
su_act_expanded = tf.expand_dims(self.probes.su_act, 3)
a_avg = tf.expand_dims(tf.reduce_mean(tf.mul(su_act_expanded,
resp_expanded), 0), 0)
a_initialize = np.zeros((1, self.params.dimx, self.params.dimy, self.params.n_cells))
for ibatch in range(n_batches_init):
print('init batch: %d' % ibatch)
a_initialize += sess.run(a_avg)
a_initialize /= n_batches_init
a_max = np.max(np.reshape(a_initialize, [-1, self.params.n_cells]), axis=0)
mask = a_initialize > a_max*0.7
a_initial_masked = mask*np.log(a_max) - 40*(1-mask)
a_initial_tf = tf.constant(a_initial_masked.astype(np.float32))
a_init_tf = tf.assign(self.variables.a, tf.reshape(a_initial_tf, [-1, self.params.n_cells]))
sess.run(a_init_tf)
tf.logging.info('a initialized from data')
from IPython.terminal.embed import InteractiveShellEmbed
ipshell = InteractiveShellEmbed()
ipshell()
#coord.request_stop()
#coord.join(threads)
tf.logging.info('a initialzed based on average activity')
def write_summaries(self, sess):
"""Save variables and add summary."""
# Save variables.
latest_filename = self.short_filename + '_latest_fn'
self.saver_var.save(sess, self.save_filename, global_step=self.iter,
latest_filename=latest_filename)
# Add summary.
summary = sess.run(self.summary_op)
self.writer.add_summary(summary, self.iter)
tf.logging.info('Summaries written, iteration: %d' % self.iter)
# print
ls_train = sess.run(self.probes.loss)
tf.logging.info('Iter %d, train loss %.3f' % (self.iter, ls_train))
def get_windows(window, stride):
"""Get locations and arrangement of the convolutional windows.
Args:
window : (2*window+1) is the symmetrical convolutional window size
stride : the stride between nearby convolutional windows
Returns:
mask_tf : Mask to identify each window.
dimx : number of windows in x dimension
dimy : number of windows in y dimension
n_pix : number of pixels in each window
"""
n_pix = (2* window + 1) ** 2 # number of pixels in the window
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
# Make mask_tf: weight (dimx X dimy X npix) for convolutional layer,
# where each layer is 1 for a particular pixel in window and 0 for others.
# this is used for flattening the pixels in a window,
# so that different weights could be applied to each window.
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] = 1
icnt += 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# Number of windows in x and y dimensions.
dimx = np.floor(1 + ((40 - (2 * window + 1))/stride)).astype('int')
dimy =
|
np.floor(1 + ((80 - (2 * window + 1))/stride))
|
numpy.floor
|
import config
import dataset
import engine
import torch
import pandas as pd
import torch.nn as nn
import numpy as np
from model import BERTBaseUncased
from sklearn import model_selection
from sklearn import metrics
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min >
|
np.iinfo(np.int64)
|
numpy.iinfo
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 14:47:27 2020
@author: Timothe
"""
import struct
import numpy as np
import os, sys
import pyprind
import sqlalchemy as sql
import pandas as pd
import warnings
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath("__filename__"))))
from LibUtils import strings, traces
def GetVSD_FileList(SessionPath):
Files = strings.RegFileSearch(SessionPath,'.*\.rsh$')
Files = strings.AlphaNum_Sort(Files)
return Files
def GetVSDEventsID(session_id, SQLengine):
query = ("""
SELECT mouse_number, MS.session_id, timestamp, SD.event_value, event_origin FROM mouses_sessions as MS
INNER JOIN session_detail as SD
ON MS.session_id = SD.session_id
INNER JOIN event_def as ED
ON SD.event_value = ED.event_value
INNER JOIN mouses as MO
ON MO.mouses_id = MS.mouses_id
INNER JOIN training_set_def as TS
ON TS.training_set_id = MS.training_set_id
where MS.session_id = {0}
""")
df = pd.DataFrame(columns = ['Trial_nb', 'VSD','Expect', 'Surprise','All_Events','All_Timestamps'])
result_2 = pd.read_sql_query(query.format(session_id), SQLengine)
lastStartEvent = 0
Trialnb = 0
Eventindex = 0
VSD = 1
Expect = 1
Surprise = 1
TimeStampList = []
ValuesList = []
for index, row in result_2.iterrows():
if row['event_value'] == 255 or row['event_value'] == 60 or row['event_value'] == 50 :
if abs(lastStartEvent - row['timestamp']) > 100 :
df.at[Trialnb, 'All_Events'] = ValuesList
df.at[Trialnb, 'All_Timestamps'] = TimeStampList
TimeStampList = []
ValuesList = []
lastStartEvent = row['timestamp']
Trialnb = Trialnb + 1
df.at[Trialnb, 'Trial_nb'] = int(Trialnb)
else :
df.at[Trialnb, 'Trial_nb'] = int(Trialnb)
if row['event_value'] == 50 :
df.at[Trialnb, 'VSD'] = int(VSD)
VSD = VSD +1
if row['event_value'] == 20 :
df.at[Eventindex, 'Expect'] = np.nan
df.at[Eventindex, 'Surprise'] = np.nan
Eventindex = Eventindex + 1
if row['event_value'] == 21 :
df.at[Eventindex, 'Expect'] = int(Expect)
Expect = Expect + 1
Eventindex = Eventindex + 1
if row['event_value'] == 22 :
df.at[Eventindex, 'Surprise'] = int(Surprise)
Surprise = Surprise +1
Eventindex = Eventindex + 1
TimeStampList.append(row['timestamp'])
ValuesList.append(row['event_value'])
#df.at[Trialnb, 'All_Events'] = df.at[Trialnb, 'All_Events'].append(row['event_value'])
if len(TimeStampList) != 0:
df.at[Trialnb, 'All_Events'] = ValuesList
df.at[Trialnb, 'All_Timestamps'] = TimeStampList
return df
def Check_SessionIsVSD(session_id,SQLengine):
queryVSD = ("""
SELECT ts.VSD from training_set_def as ts
inner join mouse_batches as mb
on ts.batch = mb.id_batches
inner join mouses_sessions as ms
on ms.training_set_id = ts.training_set_id
where ms.session_id = {0}
""")
result = pd.read_sql_query(queryVSD.format(session_id), SQLengine)
State = result.VSD[0]
if State:
return True
else :
return False
def GetVSD_Data(InputPath):
Files = ReadRSH(InputPath)
Data, Signals, Rawdata= ReadRSD(Files)
return Data, Signals, Rawdata
def ReadRSH(InputPath):
Files = []
with open(InputPath,'r') as F:
Line = F.readline()
while Line :
if "Data-File-List" in Line:
FileLine = F.readline()
while FileLine:
if '.rsd' in FileLine or '.rsh' in FileLine :
Files.append(os.path.join( os.path.dirname(InputPath) , FileLine.rstrip()))
FileLine = F.readline()
break
Line = F.readline()
#warnings.warn(f'Using RSD files : {Files}', category = RuntimeWarning ,stacklevel = 00)
#print(Files)
return Files
def ReshapeMICAMsignal(array3D, **kwargs):
Output = []
temp = []
for I in range(np.shape(array3D)[0]):
for Line in range(int(np.shape(array3D)[1]/4)):
for J in range(np.shape(array3D)[2]):
for K in range(4):
temp.append(array3D[I,K+(Line*4),J])
Output.extend(temp)
temp = []
if 'reverse' in kwargs and not kwargs.get('reverse'):
return np.asarray(Output)
else :
return np.invert(np.asarray(Output))
def FasterReadRSD(InputPath):
FramesPerFile = 256 #default behavior of RSD format
msg = "Reading : " + os.path.split(InputPath)[1]
print(msg,end = "\r")
with open(InputPath ,'rb') as F:
byte_content = F.read()
fmt = "<%dh" % (len(byte_content) // 2)
IntData = struct.unpack(fmt, byte_content)
#return np.resize(IntData,(FramesPerFile,100,128))
print( len(msg) * " ",end = "\r")
return np.reshape(IntData,(FramesPerFile,100,128))
def OldReadRSD(InputPath):
with open( InputPath ,'rb') as F:
Size = os.path.getsize(InputPath[ItemIndex])
bar = pyprind.ProgBar(int(Size/1600),bar_char='░', title=f'loading VSD :{InputPath[ItemIndex]}')
IntData = []
byte = F.read(2) #Reading 16 bytes "short integer" Z (negative & positive) natural numbers data (centered around 0 : -32,768 to 32,767)
cnt = 1
while byte :
IntData.append(struct.unpack('h', byte))
byte = F.read(2)
if cnt % 800 == 0 :
bar.update()
cnt = cnt + 1
del bar
return np.resize(IntData,(FilesPerSequence,100,128))
def ReadRSD(InputPath):
imagespan = [20,120]
SignalsSpan = [0,80]
FrameSpan = [10,12]
AIn1Span = [12,14]
AIn2Span = [14,16]
Stim1Span = [8,10]
Stim2Span = [6,8]
FilesPerSequence = 256
for ItemIndex in range(len(InputPath)):
Data = FasterReadRSD(InputPath[ItemIndex])
if ItemIndex == 0 :
RawData = Data
Image0 = Data[0,:,imagespan[0]:imagespan[1]]
VarimagesImages = Data[1:,:,imagespan[0]:imagespan[1]]
AnalogIn1 = Data[:,SignalsSpan[0]:SignalsSpan[1],AIn1Span[0]:AIn1Span[1]]
AnalogIn2 = Data[:,SignalsSpan[0]:SignalsSpan[1],AIn2Span[0]:AIn2Span[1]]
Stim1 = Data[:,SignalsSpan[0]:SignalsSpan[1],Stim1Span[0]:Stim1Span[1]]
Stim2 = Data[:,SignalsSpan[0]:SignalsSpan[1],Stim2Span[0]:Stim2Span[1]]
Frames = Data[:,SignalsSpan[0]:SignalsSpan[1],FrameSpan[0]:FrameSpan[1]]
Images = np.empty((len(InputPath) * FilesPerSequence ,np.shape(VarimagesImages)[1] ,
|
np.shape(VarimagesImages)
|
numpy.shape
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 17:56:24 2017
@author: sarah
"""
import numpy as np
from misc import *
import world
import environment as env
import agent as agt
import perception as prc
import action_selection as asl
import itertools
import matplotlib.pylab as plt
from multiprocessing import Pool
from matplotlib.colors import LinearSegmentedColormap
import jsonpickle as pickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import json
import seaborn as sns
import os
import pandas as pd
import gc
import pickle
np.set_printoptions(threshold = 100000, precision = 5)
plt.style.use('seaborn-whitegrid')
# always pass a list of classes
def save_data(file_name, objects):
with open(file_name, 'wb') as output_file:
pickle.dump(objects, output_file)
def load_data(file_name):
with open(file_name, 'rb') as file:
objects = pickle.load(file)
return objects
def extract_object(obj):
keys = []
obj_dict = obj.__dict__
for key in obj_dict:
keys.append(key)
return keys, obj_dict
"""
run function
"""
save = True
data_folder = os.path.join('C:\\Users\\admin\\Desktop\\project\\BalancingControl','data')
const = 0#1e-10
trials = 200 #number of trials
T = 5 #number of time steps in each trial
Lx = 4 #grid length
Ly = 5
no = Lx*Ly #number of observations
ns = Lx*Ly #number of states
na = 3 #number of actions
npi = na**(T-1)
nr = 2
nc = ns
actions = np.array([[0,-1], [1,0], [0,1]])
g1 = 14
g2 = 10
start = 2
print("start", start)
print("g2", g2)
print("g1", g1)
print("nc", nc)
print("nr", nr)
print("npi", npi)
print("na", na)
print("ns", ns)
print("no", no)
print("trials", trials)
print("data_folder", data_folder)
print("save", save)
print('\n\nrunning simulations\n\n')
print('-------------------------')
def run_agent(par_list, trials=trials, T=T, Lx = Lx, Ly = Ly, ns=ns, na=na,var=0.1,run=0,\
sample_post = False, sample_other = False, prior_start = True):
#set parameters:
#obs_unc: observation uncertainty condition
#state_unc: state transition uncertainty condition
#goal_pol: evaluate only policies that lead to the goal
#utility: goal prior, preference p(o)
# over_actions -> ddm uses prior and likelihood over actions or policies
obs_unc, state_unc, goal_pol, selector, context, utility, over_actions, h, q = par_list
print("q", q)
print("h", h)
name_str = selector + '_s'+ str(var)+'_context_' + str(context) + '_over-actions_'+ str(over_actions)+'_h'+str(h) + '_'+str(run)
"""
create matrices
"""
vals = np.array([1., 2/3., 1/2., 1./2.])
#generating probability of observations in each state
A = np.eye(ns) + const
np.fill_diagonal(A, 1-(ns-1)*const)
#state transition generative probability (matrix)
B = np.zeros((ns, ns, na)) + const
cert_arr = np.zeros(ns)
for s in range(ns):
x = s//Ly
y = s%Ly
#state uncertainty condition
if state_unc:
if (x==0) or (y==3):
c = vals[0]
elif (x==1) or (y==2):
c = vals[1]
elif (x==2) or (y==1):
c = vals[2]
else:
c = vals[3]
condition = 'state'
else:
c = 1.
cert_arr[s] = c
for u in range(na):
x = s//Ly+actions[u][0]
y = s%Ly+actions[u][1]
#check if state goes over boundary
if x < 0:
x = 0
elif x == Lx:
x = Lx-1
if y < 0:
y = 0
elif y == Ly:
y = Ly-1
s_new = Ly*x + y
if s_new == s:
B[s, s, u] = 1 - (ns-1)*const
else:
B[s, s, u] = 1-c + const
B[s_new, s, u] = c - (ns-1)*const
B_c = np.broadcast_to(B[:,:,:,np.newaxis], (ns, ns, na, nc))
"""
create environment (grid world)
"""
Rho = np.zeros((nr,ns)) + const
Rho[0,:] = 1 - (nr-1)*const
Rho[:,np.argmax(utility)] = [0+const, 1-(nr-1)*const]
util = np.array([1-np.amax(utility), np.amax(utility)])
environment = env.GridWorld(A, B, Rho, trials = trials, T = T, initial_state=start)
Rho_agent = np.ones((nr,ns,nc))/ nr
if True:
templates = np.ones_like(Rho_agent)
templates[0] *= 100
assert ns == nc
for s in range(ns):
templates[0,s,s] = 1
templates[1,s,s] = 100
dirichlet_rew_params = templates
else:
dirichlet_rew_params = np.ones_like(Rho_agent)
"""
create policies
"""
pol = np.array(list(itertools.product(list(range(na)), repeat=T-1)))
#pol = pol[np.where(pol[:,0]>1)]
npi = pol.shape[0]
prior_policies = np.ones((npi,nc)) / npi
dirichlet_pol_param = np.zeros_like(prior_policies) + h
"""
set state prior (where agent thinks it starts)
"""
state_prior = np.zeros((ns))
state_prior[start] = 1
"""
set context prior and matrix
"""
context_prior = np.ones(nc)
trans_matrix_context = np.ones((nc,nc))
if nc > 1:
# context_prior[0] = 0.9
# context_prior[1:] = 0.1 / (nc-1)
context_prior /= nc
trans_matrix_context[:] = (1-q) / (nc-1)
np.fill_diagonal(trans_matrix_context, q)
"""
set action selection method
"""
if selector == 'dir':
ac_sel = asl.DirichletSelector(trials = trials, T = T, factor=0.5,
number_of_actions = na, calc_entropy=False, calc_dkl=False, draw_true_post=True)
elif selector == 'ddm':
pass
# sel = 'max'
# ac_sel = asl.MaxSelector(trials = trials, T = T,
# number_of_actions = na)
elif selector == 'rdm':
ac_sel = asl.RacingDiffusionSelector(trials = trials, T=T, s=var, number_of_actions=na, over_actions = over_actions)
ac_sel.sample_other = sample_other
ac_sel.sample_posterior = sample_post
ac_sel.prior_as_starting_point = prior_start
ac_sel.b = 2
ac_sel.wd = 1.809
elif selector == 'ardm':
ac_sel = asl.AdvantageRacingDiffusionSelector(trials = trials, T=T, s=var, number_of_actions=na, over_actions = over_actions)
ac_sel.sample_other = sample_other
ac_sel.sample_posterior = sample_post
ac_sel.prior_as_starting_point = prior_start
ac_sel.b = 2
ac_sel.wd = 1.809
else:
print('nothing selected')
"""
set up agent
"""
#bethe agent
if agent == 'bethe':
agnt = 'bethe'
# perception and planning
bayes_prc = prc.HierarchicalPerception(A, B_c, Rho_agent, trans_matrix_context, state_prior,
util, prior_policies,
dirichlet_pol_params = dirichlet_pol_param,
dirichlet_rew_params = dirichlet_rew_params)
bayes_pln = agt.BayesianPlanner(bayes_prc, ac_sel, pol,
trials = trials, T = T,
prior_states = state_prior,
prior_policies = prior_policies,
prior_context = context_prior,
number_of_states = ns,
learn_habit = True,
learn_rew = True,
#save_everything = True,
number_of_policies = npi,
number_of_rewards = nr)
#MF agent
else:
agnt = 'mf'
# perception and planning
bayes_prc = prc.MFPerception(A, B, state_prior, utility, T = T)
bayes_pln = agt.BayesianMFPlanner(bayes_prc, [], ac_sel,
trials = trials, T = T,
prior_states = state_prior,
policies = pol,
number_of_states = ns,
number_of_policies = npi)
"""
create world
"""
w = world.World(environment, bayes_pln, trials = trials, T = T)
"""
simulate experiment
"""
if not context:
w.simulate_experiment()
print("Rho", Rho)
else:
w.simulate_experiment(curr_trials=range(0, trials//2))
Rho_new = np.zeros((nr,ns)) + const
Rho_new[0,:] = 1 - (nr-1)*const
Rho_new[:,g2] = [0+const, 1-(nr-1)*const]
print("Rho_new", Rho_new)
w.environment.Rho[:] = Rho_new
#w.agent.perception.generative_model_rewards = Rho_new
w.simulate_experiment(curr_trials=range(trials//2, trials))
"""
plot and evaluate results
"""
plt.close()
#find successful and unsuccessful runs
#goal = np.argmax(utility)
successfull_g1 = np.where(environment.hidden_states[:,-1]==g1)[0]
if context:
successfull_g2 = np.where(environment.hidden_states[:,-1]==g2)[0]
unsuccessfull1 = np.where(environment.hidden_states[:,-1]!=g1)[0]
unsuccessfull2 = np.where(environment.hidden_states[:,-1]!=g2)[0]
unsuccessfull = np.intersect1d(unsuccessfull1, unsuccessfull2)
else:
unsuccessfull = np.where(environment.hidden_states[:,-1]!=g1)[0]
#total = len(successfull)
#plot start and goal state
start_goal = np.zeros((Lx,Ly))
x_y_start = (start//Ly, start%Ly)
start_goal[x_y_start] = 1.
x_y_g1 = (g1//Ly, g1%Ly)
start_goal[x_y_g1] = -1.
x_y_g2 = (g2//Ly, g2%Ly)
start_goal[x_y_g2] = -2.
palette = [(159/255, 188/255, 147/255),
(135/255, 170/255, 222/255),
(242/255, 241/255, 241/255),
(242/255, 241/255, 241/255),
(199/255, 174/255, 147/255),
(199/255, 174/255, 147/255)]
#set up figure params
factor = 3
grid_plot_kwargs = {'vmin': -2, 'vmax': 2, 'center': 0, 'linecolor': '#D3D3D3',
'linewidths': 7, 'alpha': 1, 'xticklabels': False,
'yticklabels': False, 'cbar': False,
'cmap': palette}#sns.diverging_palette(120, 45, as_cmap=True)} #"RdBu_r",
# plot grid
fig = plt.figure(figsize=[factor*5,factor*4])
ax = fig.gca()
annot = np.zeros((Lx,Ly))
for i in range(Lx):
for j in range(Ly):
annot[i,j] = i*Ly+j
u = sns.heatmap(start_goal, ax = ax, **grid_plot_kwargs, annot=annot, annot_kws={"fontsize": 40})
ax.invert_yaxis()
plt.savefig('grid.svg', dpi=600)
#plt.show()
# set up paths figure
fig = plt.figure(figsize=[factor*5,factor*4])
ax = fig.gca()
u = sns.heatmap(start_goal, zorder=2, ax = ax, **grid_plot_kwargs)
ax.invert_yaxis()
#find paths and count them
n1 = np.zeros((ns, na))
for i in successfull_g1:
for j in range(T-1):
d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
if d not in [1,-1,Ly,-Ly,0]:
print("ERROR: beaming")
if d == 1:
n1[environment.hidden_states[i, j],0] +=1
if d == -1:
n1[environment.hidden_states[i, j]-1,0] +=1
if d == Ly:
n1[environment.hidden_states[i, j],1] +=1
if d == -Ly:
n1[environment.hidden_states[i, j]-Ly,1] +=1
n2 = np.zeros((ns, na))
if context:
for i in successfull_g2:
for j in range(T-1):
d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
if d not in [1,-1,Ly,-Ly,0]:
print("ERROR: beaming")
if d == 1:
n2[environment.hidden_states[i, j],0] +=1
if d == -1:
n2[environment.hidden_states[i, j]-1,0] +=1
if d == Ly:
n2[environment.hidden_states[i, j],1] +=1
if d == -Ly:
n2[environment.hidden_states[i, j]-Ly,1] +=1
un = np.zeros((ns, na))
for i in unsuccessfull:
for j in range(T-1):
d = environment.hidden_states[i, j+1] - environment.hidden_states[i, j]
if d not in [1,-1,Ly,-Ly,0]:
print("ERROR: beaming")
if d == 1:
un[environment.hidden_states[i, j],0] +=1
if d == -1:
un[environment.hidden_states[i, j]-1,0] +=1
if d == Ly:
un[environment.hidden_states[i, j],1] +=1
if d == -Ly:
un[environment.hidden_states[i, j]-4,1] +=1
total_num = n1.sum() + n2.sum() + un.sum()
if np.any(n1 > 0):
n1 /= total_num
if np.any(n2 > 0):
n2 /= total_num
if np.any(un > 0):
un /= total_num
#plotting
for i in range(ns):
x = [i%Ly + .5]
y = [i//Ly + .5]
#plot uncertainties
if obs_unc:
plt.plot(x,y, 'o', color=(219/256,122/256,147/256), markersize=factor*12/(A[i,i])**2, alpha=1.)
if state_unc:
plt.plot(x,y, 'o', color=(100/256,149/256,237/256), markersize=factor*12/(cert_arr[i])**2, alpha=1.)
#plot unsuccessful paths
for j in range(2):
if un[i,j]>0.0:
if j == 0:
xp = x + [x[0] + 1]
yp = y + [y[0] + 0]
if j == 1:
xp = x + [x[0] + 0]
yp = y + [y[0] + 1]
plt.plot(xp,yp, '-', color='#D5647C', linewidth=factor*75*un[i,j],
zorder = 9, alpha=1)
#set plot title
#plt.title("Planning: successful "+str(round(100*total/trials))+"%", fontsize=factor*9)
#plot successful paths on top
for i in range(ns):
x = [i%Ly + .5]
y = [i//Ly + .5]
for j in range(2):
if n1[i,j]>0.0:
if j == 0:
xp = x + [x[0] + 1]
yp = y + [y[0]]
if j == 1:
xp = x + [x[0] + 0]
yp = y + [y[0] + 1]
plt.plot(xp,yp, '-', color='#4682B4', linewidth=factor*75*n1[i,j],
zorder = 10, alpha=1)
#plot successful paths on top
if context:
for i in range(ns):
x = [i%Ly + .5]
y = [i//Ly + .5]
for j in range(2):
if n2[i,j]>0.0:
if j == 0:
xp = x + [x[0] + 1]
yp = y + [y[0]]
if j == 1:
xp = x + [x[0] + 0]
yp = y + [y[0] + 1]
plt.plot(xp,yp, '-', color='#55ab75', linewidth=factor*75*n2[i,j],
zorder = 10, alpha=1)
#print("percent won", total/trials, "state prior", np.amax(utility))
name = 'chosen_path'
if over_actions:
name += '_actions'
else:
name += '_policies'
if context:
name += '_cont-1'
else:
name += '_cont-0'
if prior_start:
name += '_prior-1'
else:
name += '_prior-0'
if sample_post:
name += '_post'
elif sample_other:
name += '_like'
else:
name += '_stand'
b = w.agent.action_selection.b
name += '_h'+str(h) + '_s' + str(var) + '_b' + str(b) + '_' + str(run) + '.png'
plt.savefig(name)
# plt.savefig('chosen_paths_'+name_str+'h'+str(h)+'.svg')
# plt.show()
max_RT = np.amax(w.agent.action_selection.RT[:,0])
plt.figure()
plt.plot(w.agent.action_selection.RT[:,0], '.')
plt.ylim([0,1.05*max_RT])
plt.xlim([0,trials])
plt.savefig("Gridworld_Dir_h"+str(h)+".svg")
# plt.show()
return w
"""
set condition dependent up parameters
"""
def run_gridworld_simulations(repetitions, s, over_actions, selector, context,\
sample_post=False, sample_other=False, prior_start = True):
# prior over outcomes: encodes utility
utility = []
#ut = [0.5, 0.6, 0.7, 0.8, 0.9, 1-1e-3]
u = 0.999
utility = np.zeros(ns)
utility[g1] = u
utility[:g1] = (1-u)/(ns-1)
utility[g1+1:] = (1-u)/(ns-1)
# action selection: avergaed or max selection
tendencies = [1,1000]
if context:
name_str = "context_"
else:
name_str = ""
l = [] # parameter list
# uncertainty observation, state,
l.append([False, False, False, selector, context, utility, over_actions])
par_list = []
for p in itertools.product(l, tendencies):
par_list.append(p[0]+[p[1]])
qs = [0.97, 0.97]
for n,pars in enumerate(par_list):
h = pars[-1]
q = qs[n]
worlds = []
for i in range(repetitions):
print("i", i)
w = run_agent(pars+[q],var=s,run=i, sample_post=sample_post,\
sample_other=sample_other,\
prior_start=prior_start)
# plot agent posterior over context
if False:
# if context:
plt.figure()
plt.plot(w.agent.posterior_context[:,0,:])
#plt.plot(w.agent.posterior_context[:,0,g2])
plt.title('w.agent.posterior_context[:,0,:]')
plt.show()
# plot reward probabilities
plt.figure()
rew_prob =
|
np.einsum('tsc,tc->ts', w.agent.posterior_dirichlet_rew[:,0,1,:,:],w.agent.posterior_context[:,0])
|
numpy.einsum
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
from mars.executor import register, Executor
from mars.tensor.core import TensorOrder
from mars.tensor.datasource import ArrayDataSource
from mars.tiles import get_tiled
from mars.session import new_session, Session
class Test(unittest.TestCase):
def setUp(self):
new_session().as_default()
def testSessionExecute(self):
a = mt.random.rand(10, 20)
res = a.sum().to_numpy()
self.assertTrue(np.isscalar(res))
self.assertLess(res, 200)
def testSessionAsyncExecute(self):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
self.assertEqual(expected, res)
res = a.sum().execute(wait=False)
res = res.result().fetch()
self.assertEqual(expected, res)
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
res = mt.ExecutableTuple(t).execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
def testMultipleOutputExecute(self):
data = np.random.random((5, 9))
# test multiple outputs
arr1 = mt.tensor(data.copy(), chunk_size=3)
result = mt.modf(arr1).execute().fetch()
expected = np.modf(data)
np.testing.assert_array_equal(result[0], expected[0])
np.testing.assert_array_equal(result[1], expected[1])
# test 1 output
arr2 = mt.tensor(data.copy(), chunk_size=3)
result = ((arr2 + 1) * 2).to_numpy()
expected = (data + 1) * 2
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
arr3 = mt.tensor(data.copy(), chunk_size=3)
arrs = mt.split(arr3, 3, axis=1)
result = arrs[0].to_numpy()
expected = np.split(data, 3, axis=1)[0]
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
data = np.random.randint(0, 10, (5, 5))
arr3 = (mt.tensor(data) + 1) * 2
arrs = mt.linalg.qr(arr3)
result = (arrs[0] + 1).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 1
np.testing.assert_array_almost_equal(result, expected)
result = (arrs[0] + 2).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 2
np.testing.assert_array_almost_equal(result, expected)
s = mt.shape(0)
result = s.execute().fetch()
expected = np.shape(0)
self.assertEqual(result, expected)
def testReExecuteSame(self):
data = np.random.random((5, 9))
# test run the same tensor
arr4 = mt.tensor(data.copy(), chunk_size=3) + 1
result1 = arr4.to_numpy()
expected = data + 1
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test run the same tensor with single chunk
arr4 = mt.tensor(data.copy())
result1 = arr4.to_numpy()
expected = data
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr4).chunks[0].key] = data + 2
result3 = arr4.to_numpy()
np.testing.assert_array_equal(result3, data + 2)
# test run same key tensor
arr5 = mt.ones((10, 10), chunk_size=3)
result1 = arr5.to_numpy()
del arr5
arr6 = mt.ones((10, 10), chunk_size=3)
result2 = arr6.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test copy, make sure it will not let the execution cache missed
df = md.DataFrame(mt.ones((10, 3), chunk_size=5))
executed = [False]
def add_one(x):
if executed[0]: # pragma: no cover
raise ValueError('executed before')
return x + 1
df2 = df.apply(add_one)
pd.testing.assert_frame_equal(df2.to_pandas(), pd.DataFrame(np.ones((10, 3)) + 1))
executed[0] = True
df3 = df2.copy()
df4 = df3 * 2
pd.testing.assert_frame_equal(df4.to_pandas(), pd.DataFrame(np.ones((10, 3)) * 4))
def testExecuteBothExecutedAndNot(self):
data = np.random.random((5, 9))
arr1 = mt.tensor(data, chunk_size=4) * 2
arr2 = mt.tensor(data) + 1
np.testing.assert_array_equal(arr2.to_numpy(), data + 1)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr2).chunks[0].key] = data + 2
results = sess.run(arr1, arr2)
np.testing.assert_array_equal(results[0], data * 2)
np.testing.assert_array_equal(results[1], data + 2)
def testTensorExecuteNotFetch(self):
data = np.random.random((5, 9))
sess = Session.default_or_local()
arr1 = mt.tensor(data, chunk_size=2) * 2
with self.assertRaises(ValueError):
sess.fetch(arr1)
self.assertIs(arr1.execute(), arr1)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr1).chunks[0].key] = data[:2, :2] * 3
expected = data * 2
expected[:2, :2] = data[:2, :2] * 3
np.testing.assert_array_equal(arr1.to_numpy(), expected)
def testDataFrameExecuteNotFetch(self):
data1 = pd.DataFrame(np.random.random((5, 4)), columns=list('abcd'))
sess = Session.default_or_local()
df1 = md.DataFrame(data1, chunk_size=2)
with self.assertRaises(ValueError):
sess.fetch(df1)
self.assertIs(df1.execute(), df1)
self.assertEqual(len(df1[df1['a'] > 1].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
self.assertEqual(len(df1[df1['a'] > 1]['a'].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(df1).chunks[0].key] = data1.iloc[:2, :2] * 3
expected = data1
expected.iloc[:2, :2] = data1.iloc[:2, :2] * 3
pd.testing.assert_frame_equal(df1.to_pandas(), expected)
pd.testing.assert_frame_equal(df1.to_pandas(fetch_kwargs={'batch_size': 2}), expected)
def testClosedSession(self):
session = new_session()
arr = mt.ones((10, 10))
result = session.run(arr)
np.testing.assert_array_equal(result, np.ones((10, 10)))
session.close()
with self.assertRaises(RuntimeError):
session.run(arr)
with self.assertRaises(RuntimeError):
session.run(arr + 1)
def testBoolIndexing(self):
arr = mt.random.rand(10, 10, chunk_size=5)
arr[3:8, 3:8] = mt.ones((5, 5))
arr2 = arr[arr == 1]
self.assertEqual(arr2.shape, (np.nan,))
arr2.execute()
self.assertEqual(arr2.shape, (25,))
arr3 = arr2.reshape((5, 5))
expected = np.ones((5, 5))
np.testing.assert_array_equal(arr3.to_numpy(), expected)
def testArrayProtocol(self):
arr = mt.ones((10, 20))
result = np.asarray(arr)
np.testing.assert_array_equal(result, np.ones((10, 20)))
arr2 = mt.ones((10, 20))
result =
|
np.asarray(arr2, mt.bool_)
|
numpy.asarray
|
import unittest
from collections import Counter
from numpy import array
from newvelles.utils.text import process_content
from newvelles.utils.text import remove_subsets, remove_similar_subsets, _remove_duplicates
from newvelles.utils.text import get_top_words_spacy
TEST_CASES = {
'Limbic is a package.': ['limbic', 'package'],
'a random number 111': ['random', 'number'],
"something I didn't expected to test with l'huillier.":
['didnt', 'expected', 'test', 'lhuillier'],
"l'huillier is a last name a will not change.": ["l'huillier", "change"],
"didn't will be removed (stopword).": ["removed", 'stopword'],
'': ['']
}
TERMS_MAPPING = {'dog': 'cat'}
TEST_CASES_TERMS_MAPPING = {'this is a dog': 'this is a cat'}
TEST_SET_CASES = [
([[0, 1, 3], [0, 1], [2], [0, 3], [4], [0, 3, 2]], {(0, 1, 2, 3), (4, )}), ([[0]], [[0]]),
([], set()), ([[0, 1], [0, 1]], {(0, 1)}),
([[0, 1], [1, 2], [2, 4], [3, 4], [9, 8, 7, 6, 5, 4]], {(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)}),
([
array([0, 19]),
array([1]),
array([2, 22]),
array([3, 19]),
array([4, 17, 22, 36]),
array([5]),
array([6]),
array([7, 11, 19, 46, 321]),
array([8]),
array([9]),
array([
10, 34, 73, 99, 122, 148, 170, 174, 209, 216, 217, 221, 223, 252, 260, 264, 276, 282,
301, 309, 310, 311, 330
]),
array([7, 11, 46, 321]),
array([12]),
array([13, 18]),
array([14]),
array([15]),
array([16]),
array([4, 17]),
array([13, 18, 22]),
array([0, 3, 7, 19, 321]),
array([20, 21]),
array([20, 21]),
array([2, 4, 18, 22, 37, 38]),
array([23]),
array([24, 35]),
array([25]),
array([26]),
array([27]),
array([28]),
array([29]),
array([30]),
array([31]),
array([32, 321]),
array([33]),
array([10, 34]),
array([24, 35]),
array([4, 36]),
array([22, 37]),
array([22, 38]),
array([39]),
array([40]),
array([41]),
array([42]),
array([43]),
array([44]),
array([45]),
array([7, 11, 46, 321]),
array([47]),
array([48]),
array([49]),
array([50, 243]),
array([51, 185, 195, 198, 201, 202]),
array([52]),
array([53]),
array([54, 105, 115, 177, 214, 220]),
array([55, 120, 139, 151, 344]),
array([56, 151, 164]),
array([57]),
array([58]),
array([59, 273]),
array([60, 103, 272, 273]),
array([61]),
array([62]),
array([63]),
array([64]),
array([65]),
array([66]),
array([67, 72, 170, 174, 276, 301, 330]),
array([68]),
array([69]),
array([70]),
array([71]),
array([67, 72, 99, 170, 215, 217, 276, 278, 282, 297, 301]),
array([10, 73, 209, 217, 260, 267, 311]),
array([74]),
array([75]),
array([76]),
array([77]),
array([78]),
array([79]),
array([80]),
array([81]),
array([82]),
array([83]),
array([84]),
array([85]),
array([86]),
array([87]),
array([88, 381]),
array([89]),
array([90]),
array([91]),
array([92]),
array([93]),
array([94]),
array([95]),
array([96]),
array([97]),
array([98]),
array([10, 72, 99, 122, 170, 216, 217, 221, 223, 282, 301, 309, 330]),
array([100]),
array([101]),
array([102]),
array([60, 103, 272, 273]),
array([104]),
array([54, 105, 115, 177, 214, 220]),
array([106]),
array([107]),
array([108]),
array([109]),
array([110]),
array([111, 113]),
array([112]),
array([111, 113]),
array([114, 212]),
array([54, 105, 115, 177, 214, 220]),
array([116, 323]),
array([117]),
array([118, 159, 165]),
array([119]),
array([55, 120, 163, 344]),
array([121]),
array([
10, 99, 122, 148, 170, 174, 193, 215, 216, 217, 221, 223, 252, 260, 264, 282, 301, 311,
330
]),
array([123]),
array([124]),
array([125]),
array([126]),
array([127]),
array([128]),
array([129]),
array([130]),
array([131]),
array([132]),
array([133, 316]),
array([134]),
array([135]),
array([136]),
array([137, 323, 325, 346]),
array([138]),
array([55, 139, 344]),
array([140]),
array([141, 151, 344]),
array([142]),
array([143]),
array([144]),
array([145, 196]),
array([146]),
array([147, 343]),
array([10, 122, 148, 221, 252, 260, 282, 301, 309, 311]),
array([149, 167]),
array([150, 343, 346, 375]),
array([55, 56, 141, 151, 344]),
array([152, 300, 302]),
array([153]),
array([154]),
array([155]),
array([156]),
array([157, 318]),
array([158]),
array([118, 159, 165]),
array([160]),
array([161]),
array([162]),
array([120, 163]),
array([56, 164, 345]),
array([118, 159, 165, 383]),
array([166, 300]),
array([149, 167, 326]),
array([168]),
array([169]),
array([
10, 67, 72, 99, 122, 170, 174, 193, 215, 216, 217, 221, 223, 233, 252, 260, 276, 282,
301, 309, 310, 311, 330
]),
array([171]),
array([172]),
array([173]),
array([10, 67, 122, 170, 174, 217, 223, 264, 301, 330]),
array([175]),
array([176]),
array([54, 105, 115, 177, 214, 220]),
array([178]),
array([179]),
array([180]),
array([181]),
array([182]),
array([183]),
array([184]),
array([51, 185, 195, 197, 198, 202, 373]),
array([186]),
array([187]),
array([188]),
array([189]),
array([190, 193]),
array([191]),
array([192]),
array([122, 170, 190, 193, 215]),
array([194, 195, 198]),
array([51, 185, 194, 195, 197, 198, 201, 202]),
array([145, 196]),
array([185, 195, 197, 198, 201, 202]),
array([51, 185, 194, 195, 197, 198, 201, 202]),
array([199]),
array([200]),
array([51, 195, 197, 198, 201, 202]),
array([51, 185, 195, 197, 198, 201, 202, 373]),
array([203]),
array([204]),
array([205, 215, 233, 249, 278, 282]),
array([206]),
array([207]),
array([208]),
array([10, 73, 209, 210, 215, 221, 252, 260, 311]),
array([209, 210]),
array([211]),
array([114, 212]),
array([213]),
array([54, 105, 115, 177, 214, 220]),
array([72, 122, 170, 193, 205, 209, 215, 221, 252, 276, 301]),
array([10, 99, 122, 170, 216, 217, 221, 223, 233, 282, 301, 309, 330]),
array(
[10, 72, 73, 99, 122, 170, 174, 216, 217, 221, 223, 276, 278, 282, 301, 309, 311, 330]),
array([218, 316]),
array([219, 224]),
array([54, 105, 115, 177, 214, 220]),
array([10, 99, 122, 148, 170, 209, 215, 216, 217, 221, 223, 252, 260, 276, 301, 330]),
array([222]),
array([10, 99, 122, 170, 174, 216, 217, 221, 223, 233, 276, 278, 282, 301, 310]),
array([219, 224]),
array([225]),
array([226]),
array([227, 228]),
array([227, 228, 318]),
array([229]),
array([230]),
array([231]),
array([232]),
array([170, 205, 216, 223, 233, 282, 309, 311, 330]),
array([234]),
array([235]),
array([236]),
array([237, 316]),
array([238]),
array([239]),
array([240]),
array([241]),
array([242]),
array([50, 243]),
array([244]),
array([245]),
array([246]),
array([247]),
array([248, 270]),
array([205, 249, 264]),
array([250]),
array([251, 260]),
array([10, 122, 148, 170, 209, 215, 221, 252, 260, 264, 282, 301, 311]),
array([253]),
array([254, 257]),
array([255]),
array([256, 271]),
array([254, 257, 263]),
array([258]),
array([259, 270]),
array([10, 73, 122, 148, 170, 209, 221, 251, 252, 260, 267, 282, 301, 310, 311]),
array([261]),
array([262]),
array([257, 263]),
array([10, 122, 174, 249, 252, 264, 278, 282, 301, 311]),
array([265]),
array([266]),
array([73, 260, 267, 311]),
array([268]),
array([269]),
array([248, 259, 270]),
array([256, 271, 275]),
|
array([60, 103, 272, 273])
|
numpy.array
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class GenerateRNBinaryDouble():
def __init__(self,
total_data_points: int,
deviation: float):
self.total_data_points = total_data_points
self.deviation = deviation
def gen(self):
label_1 = []
label_0 = []
label_12 = []
s_1 = np.random.default_rng().normal(-1.5, self.deviation, int(self.total_data_points / 4))
s_0 = np.random.default_rng().normal(0, self.deviation, int(self.total_data_points / 2))
s_12 = np.random.default_rng().normal(1.5, self.deviation, int(self.total_data_points / 4))
for i in range(len(s_1)):
label_1.append(-1)
label_12.append(-1)
for i in range(len(s_0)):
label_0.append(1)
x = np.append(s_1, s_12)
x = np.append(x, s_0)
x_label =
|
np.append(label_1, label_12)
|
numpy.append
|
import numpy
import matplotlib.pyplot as plt
import tellurium as te
from rrplugins import Plugin
auto = Plugin("tel_auto2000")
from te_bifurcation import model2te, run_bf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import ScalarFormatter
sf = ScalarFormatter()
sf.set_scientific(False)
import re
import seaborn as sns
import os
from pickle import dump, load
from sympy import *
import sobol_seq
import pickle
from matplotlib import ticker as mticker
# Define symbolic variables for symbolic Jacobian
R, r, C1, C2, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, sR, a1, a2, b1, b2, A = symbols('R r C1 C2 mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k s_R a1 a2 b1 b2 A', positive=True, real=True)
c1A, c1B, c2, rev, koff, kR, sR0, sR, g, s, C = symbols('c1A c1B c2 rev koff kR sR0 sR g s C', positive=True, real=True)
R, r, C, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, kR, A, g = \
symbols('R r C mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k k_R A g', positive=True, real=True)
# Samples of parameter values
n = int(1E2) # Production run 1E5
ss = sobol_seq.i4_sobol_generate(6, int(n))
l = np.power(2, -3 + (4+3)*ss[:,:4])
a1sp, a2sp, b1sp, b2sp = l[:,0], l[:,1], l[:,2], l[:,3]
K1sp = 10**(ss[:,4]*(np.log10(70000)-
|
np.log10(7)
|
numpy.log10
|
'''
Created on Sep 29, 2017
@author: Michal.Busta at gmail.com
'''
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import os
f = open('codec.txt', 'r')
codec = f.readlines()[0]
f.close()
print(len(codec))
import torch
import net_utils
import argparse
import ocr_gen
import time
import collections
# from warpctc_pytorch import CTCLoss
import torch.nn as nn
from torch.autograd import Variable
from utils import E2Ecollate,E2Edataset,alignCollate,ocrDataset
from models import ModelResNetSep_final
from ocr_test_utils import print_seq_ext
from net_eval import eval_ocr
import random
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (str): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet, ignore_case=False):
self._ignore_case = ignore_case
if self._ignore_case:
alphabet = alphabet.lower()
self.alphabet = alphabet + '-' # for `-1` index
self.dict = {}
index = 4
for char in (alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = index
index += 1
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
if isinstance(text, str):
texts = []
for char in text:
if char in self.dict:
texts.append(self.dict[char.lower() if self._ignore_case else char])
else:
texts.append(3)
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
texts, _ = self.encode(text)
return (torch.IntTensor(texts), torch.IntTensor(length))
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(),
length)
if raw:
return ''.join([self.alphabet[i - 4] for i in t])
else:
char_list = []
for i in range(length):
if t[i] > 3 and t[i] < (len(self.alphabet) + 4) and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 4])
elif t[i] == 3 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(' ')
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(
t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
import cv2
base_lr = 0.0001
lr_decay = 0.99
momentum = 0.9
weight_decay = 0.0005
batch_per_epoch = 1000
disp_interval = 200
converter = strLabelConverter(codec)
def main(opts):
model_name = 'E2E-MLT'
net = ModelResNetSep_final(attention=True)
acc = []
ctc_loss = nn.CTCLoss()
if opts.cuda:
net.cuda()
ctc_loss.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.0005, max_lr=0.001, step_size_up=3000,
cycle_momentum=False)
step_start = 0
if os.path.exists(opts.model):
print('loading model from %s' % args.model)
step_start, learning_rate = net_utils.load_net(args.model, net, optimizer)
else:
learning_rate = base_lr
for param_group in optimizer.param_groups:
param_group['lr'] = base_lr
learning_rate = param_group['lr']
print(param_group['lr'])
step_start = 0
net.train()
#acc_test = test(net, codec, opts, list_file=opts.valid_list, norm_height=opts.norm_height)
#acc.append([0, acc_test])
# ctc_loss = CTCLoss()
ctc_loss = nn.CTCLoss()
data_generator = ocr_gen.get_batch(num_workers=opts.num_readers,
batch_size=opts.batch_size,
train_list=opts.train_list, in_train=True, norm_height=opts.norm_height, rgb = True, normalize= True)
val_dataset = ocrDataset(root=opts.valid_list, norm_height=opts.norm_height , in_train=False,is_crnn=False)
val_generator = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False,
collate_fn=alignCollate())
# val_generator1 = torch.utils.data.DataLoader(val_dataset, batch_size=2, shuffle=False,
# collate_fn=alignCollate())
cnt = 1
cntt = 0
train_loss_lr = 0
time_total = 0
train_loss = 0
now = time.time()
for step in range(step_start, 300000):
# batch
images, labels, label_length = next(data_generator)
im_data = net_utils.np_to_variable(images, is_cuda=opts.cuda).permute(0, 3, 1, 2)
features = net.forward_features(im_data)
labels_pred = net.forward_ocr(features)
# backward
'''
acts: Tensor of (seqLength x batch x outputDim) containing output from network
labels: 1 dimensional Tensor containing all the targets of the batch in one sequence
act_lens: Tensor of size (batch) containing size of each output sequence from the network
act_lens: Tensor of (batch) containing label length of each example
'''
probs_sizes = torch.IntTensor([(labels_pred.permute(2, 0, 1).size()[0])] * (labels_pred.permute(2, 0, 1).size()[1])).long()
label_sizes = torch.IntTensor(torch.from_numpy(np.array(label_length)).int()).long()
labels = torch.IntTensor(torch.from_numpy(
|
np.array(labels)
|
numpy.array
|
"""
This module contains tests for tofu.geom in its structured version
"""
# Built-in
import os
import shutil
import itertools as itt
import warnings
# Standard
import numpy as np
import matplotlib.pyplot as plt
# tofu-specific
from tofu import __version__
import tofu as tf
_here = os.path.abspath(os.path.dirname(__file__))
VerbHead = 'tofu.mesh.test_01_checks'
_TOFU_USER = os.path.join(os.path.expanduser("~"), '.tofu')
_CUSTOM = os.path.dirname(os.path.dirname(os.path.dirname(_here)))
_CUSTOM = os.path.join(_CUSTOM, 'scripts', 'tofucustom.py')
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
# Recreating clean .tofu
# out = subprocess.run(_CUSTOM, stdout=PIPE, stderr=PIPE)
os.system('python '+_CUSTOM)
def teardown_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
#######################################################
#
# checking routines
#
#######################################################
class Test01_Inversions():
@classmethod
def setup_class(cls):
pass
def setup(self):
# create conf and cam
conf0 = tf.load_config('WEST-V0')
cam = tf.geom.utils.create_CamLOS1D(
pinhole=[3.0, 1., 0.3],
focal=0.1,
sensor_size=0.1,
sensor_nb=30,
orientation=[-5*np.pi/6, 0, 0],
config=conf0,
Name='camH',
Exp='WEST',
Diag='SXR',
)
# mesh deg 1 and 2
mesh = tf.data.Mesh2D()
mesh.add_mesh(
crop_poly=conf0,
key='try1',
res=0.10,
deg=1,
)
mesh.add_bsplines(deg=2)
# add geometry matrices
chan = np.arange(0, 30)
mesh.add_ref(key='chan', data=chan, group='chan')
mesh.add_geometry_matrix(cam=cam, key='try1-bs1', key_chan='chan')
mesh.add_geometry_matrix(cam=cam, key='try1-bs2', key_chan='chan')
# add data
t0 = np.array([0])
t1 =
|
np.array([0, 1.])
|
numpy.array
|
# Copyright 2021 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module on the discrete time evolution of a density matrix.
"""
from typing import Dict, List, Optional, Text, Tuple
from copy import copy
import numpy as np
from numpy import ndarray
from time_evolving_mpo.base_api import BaseAPIClass
from time_evolving_mpo.config import NpDtype, NpDtypeReal
from time_evolving_mpo.file_formats import assert_tempo_dynamics_dict
from time_evolving_mpo.util import save_object, load_object
class Dynamics(BaseAPIClass):
"""
Represents a specific time evolution of a density matrix.
Parameters
----------
times: List[float] (default = None)
A list of points in time.
states: List[ndarray] (default = None)
A list of states at the times `times`.
name: str
An optional name for the dynamics.
description: str
An optional description of the dynamics.
description_dict: dict
An optional dictionary with descriptive data.
"""
def __init__(
self,
times: Optional[List[float]] = None,
states: Optional[List[ndarray]] = None,
name: Optional[Text] = None,
description: Optional[Text] = None,
description_dict: Optional[Dict] = None) -> None:
"""Create a Dynamics object. """
# input check times and states
if times is None:
times = []
if states is None:
states = []
assert isinstance(times, list), \
"Argument `times` must be a list."
assert isinstance(states, list), \
"Argument `states` must be a list."
assert len(times) == len(states), \
"Lists `times` and `states` must have the same length."
self._times = []
self._states = []
self._expectation_operators = []
self._expectation_lists = []
self._shape = None
for time, state in zip(times, states):
self.add(time, state)
super().__init__(name, description, description_dict)
def __str__(self) -> Text:
ret = []
ret.append(super().__str__())
ret.append(" length = {} timesteps \n".format(len(self)))
if len(self) > 0:
ret.append(" min time = {} \n".format(
np.min(self._times)))
ret.append(" max time = {} \n".format(
np.max(self._times)))
return "".join(ret)
def __len__(self) -> int:
return len(self._times)
def _sort(self) -> None:
"""Sort the time evolution (chronologically). """
tuples = zip(self._times, self._states)
__times, __states = zip(*sorted(tuples)) # ToDo: make more elegant
self._times = list(__times)
self._states = list(__states)
@property
def times(self) -> ndarray:
"""Times of the dynamics. """
return np.array(self._times, dtype=NpDtypeReal)
@property
def states(self) -> ndarray:
"""States of the dynamics. """
return np.array(self._states, dtype=NpDtype)
@property
def shape(self) -> ndarray:
"""Numpy shape of the states. """
return copy(self._shape)
def add(
self,
time: float,
state: ndarray) -> None:
"""
Append a state at a specific time to the time evolution.
Parameters
----------
time: float
The point in time.
state: ndarray
The state at the time `time`.
"""
try:
__time = float(time)
except Exception as e:
raise AssertionError("Argument `time` must be float.") from e
try:
__state = np.array(state, dtype=NpDtype)
except Exception as e:
raise AssertionError("Argument `state` must be ndarray.") from e
if self._shape is None:
__shape = __state.shape
assert len(__shape) == 2, \
"State must be a square matrix. " \
+ "But the dimensions are {}.".format(__shape)
assert __shape[0] == __shape[1], \
"State must be a square matrix. " \
+ "But the dimensions are {}.".format(__shape)
self._shape = __shape
else:
assert __state.shape == self._shape, \
"Appended state doesn't have the same shape as previous " \
+ "states ({}, but should be {})".format(__state.shape,
self._shape)
self._times.append(__time)
self._states.append(__state)
# ToDo: do this more elegantly and less resource draining.
if len(self) > 1 and (self._times[-1] < np.max(self._times[:-1])):
self._sort()
def export(
self,
filename: Text,
overwrite: bool = False) -> None:
"""
Save dynamics to a file (format TempoDynamicsFormat version 1.0).
Parameters
----------
filename: str
Path and filename to file that should be created.
overwrite: bool (default = False)
If set `True` then file is overwritten in case it already exists.
"""
dyn = {"version": "1.0",
"name": self.name,
"description": self.description,
"description_dict": self.description_dict,
"times": self.times,
"states": self.states}
assert_tempo_dynamics_dict(dyn)
save_object(dyn, filename, overwrite)
def expectations(
self,
operator: Optional[ndarray] = None,
real: Optional[bool] = False) -> Tuple[ndarray, ndarray]:
r"""
Return the time evolution of the expectation value of specific
operator. The expectation for :math:`t` is
.. math::
\langle \hat{O}(t) \rangle = \mathrm{Tr}\{ \hat{O} \rho(t) \}
with `operator` :math:`\hat{O}`.
Parameters
----------
operator: ndarray (default = None)
The operator :math:`\hat{O}`. If `operator` is `None` then the
trace of :math:`\rho(t)` is returned.
real: bool (default = False)
If set True then only the real part of the expectation is returned.
Returns
-------
times: ndarray
The points in time :math:`t`.
expectations: ndarray
Expectation values :math:`\langle \hat{O}(t) \rangle`.
"""
if len(self) == 0:
return None, None
if operator is None:
__operator = np.identity(self._shape[0], dtype=NpDtype)
else:
try:
__operator = np.array(operator, dtype=NpDtype)
except Exception as e:
raise AssertionError("Argument `operator` must be ndarray.") \
from e
assert __operator.shape == self._shape, \
"Argument `operator` must have the same shape as the " \
+ "states. Has shape {}, ".format(__operator.shape) \
+ "but should be {}.".format(self._shape)
operator_index = next((i for i, op in \
enumerate(self._expectation_operators) if \
np.array_equal(op, __operator)), -1)
if operator_index == -1: # Operator not seen before
self._expectation_operators.append(__operator)
self._expectation_lists.append([])
expectations_list = self._expectation_lists[operator_index]
for state in self._states[len(expectations_list):]:
expectations_list.append(np.trace(__operator @ state))
self._expectation_lists[operator_index] = expectations_list
times = np.array(self._times)
if real:
expectations = np.real(
|
np.array(expectations_list)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
from mpl_toolkits import mplot3d
def read_data(filename,dim):
with open(filename, 'r') as f:
lines = f.readlines()
num_points = len(lines)
dim_points = dim
data = np.empty((num_points, dim_points))
labels = np.empty(num_points)
for ind, line in enumerate(lines):
num = line.split(',')
labels[ind] = num[0]
data[ind] = num[1:]
return (data, labels)
dim = 13
wine_data, wine_labels = read_data("wine_data.csv",dim)
rescaling_data = wine_data.T
for i in range(dim):
column = rescaling_data[i]
print(column.shape)
mean = np.mean(column)
var = np.std(column)
print('Before doing ::', np.mean(rescaling_data[i]), '... var is :' , np.std(rescaling_data[i]))
rescaling_data[i] = (rescaling_data[i]-mean) / var
print('After doing ::', np.mean(rescaling_data[i]), '... var is :' , np.std(rescaling_data[i]))
rescaling_data = rescaling_data.T
P =
|
np.dot(rescaling_data.T, rescaling_data)
|
numpy.dot
|
import numpy as np
import time
import copy
from foolbox.utils import crossentropy, softmax
import torch
def sigmoid(inX):
return 1.0 / (1 + np.exp(-inX))
def l2_distance(a, b):
return (np.sum((a/255.0 - b/255.0) ** 2))**0.5
def ortho(noise):
noise_dim=noise.shape
xr=(np.random.rand(noise_dim[0]))
xo=xr-(np.sum(noise*xr)/np.sum(noise**2))*noise
xo -= np.mean(xo)
xo=np.reshape(xo, (1, 28, 28))
return xo
def cw_loss_calculator(label, inputs):
return np.max(inputs) - inputs[label]
def griddle(noise, rate):
noise_temp = np.round(noise)
noise_temp = np.abs(noise_temp)
negative_rate = 1 - rate
perturbed_num = np.sum(noise_temp != 0)
deleted = 0
for i in range(1, 256):
if np.sum(noise_temp == i) != 0:
temp_deleted = deleted + np.sum(noise_temp == i)
if temp_deleted/(perturbed_num * 1.0) >= negative_rate:
lottery_rate = (negative_rate*perturbed_num*1.0 - deleted)/(np.sum(noise_temp == i))
temp_A = copy.deepcopy(noise_temp)
temp_A[temp_A != i] = 0
temp_B = np.random.uniform(0, 1, np.shape(temp_A))
temp_B[temp_B<lottery_rate] = 0
temp_B[temp_B>=lottery_rate] = 1
noise_temp = noise_temp - temp_A + temp_A*temp_B
break
else:
noise_temp[noise_temp == i] = 0
deleted = temp_deleted
mask = copy.deepcopy(noise_temp)
mask[mask != 0] = 1
return mask
def clip(x, min_x=-1, max_x=1):
x[x < min_x] = min_x
x[x > max_x] = max_x
return x
def l2_distance(a, b):
return (np.sum((np.round(a)/255.0 - np.round(b)/255.0) ** 2))**0.5
class Attacker:
def __init__(self, model):
self.model = model
def attack(self, inputs):
return NotImplementedError
def attack_target(self, inputs, targets):
return NotImplementedError
class EvolutionaryAttack(Attacker):
def __init__(self, model):
self.model = model
def ce_and_cw_loss(self, inputs, label):
logits = self.model.forward_one(np.round(inputs).astype(np.float32))
ce_loss = crossentropy(label, logits)
cw_loss = cw_loss_calculator(label, logits)
return ce_loss, cw_loss
def cw_prob_calculator(self, logits, label):
predict_label = np.argmax(logits)
exp_logits = np.exp(logits)
prob = exp_logits/np.sum(exp_logits)
if predict_label != label:
cw_prob = np.max(prob) - prob[label]
else:
temp_prob = copy.deepcopy(prob)
temp_prob[label] = -9999
near_label = np.argmax(temp_prob)
cw_prob = prob[near_label] - prob[label]
return cw_prob
def predictions(self, inputs):
logits = self.model.forward_one(np.round(inputs).astype(np.float32))
return np.argmax(logits), logits
def distance(self, input1, input2, min_, max_):
return np.mean((input1 - input2) ** 2) / ((max_ - min_) ** 2)
def print_distance(self, distance):
return np.sqrt(distance * 1*28*28)
def log_step(self, step, distance, spherical_step, source_step, message=''):
print('Step {}: {:.5f}, stepsizes = {:.1e}/{:.1e}: {}'.format(
step,
self.print_distance(distance),
spherical_step,
source_step,
message))
def evolutionary_attack(
self,
original,
label,
starting_point,
initial_time,
time_limit=10,
iterations=1000,
spherical_step=3e-2,
source_step=1e-2,
min_=0.0,
max_=255.0,
mode='targeted',
rescale_or_not = False,
rate = 0.2,
step_decay_factor=0.99,
big_size = 64,
center_size = 40):
from numpy.linalg import norm
from scipy import interpolate
import collections
resize_factor = 4
perturbed = starting_point
dis = self.distance(perturbed, original, min_, max_)
shape = [center_size, center_size]
big_shape = [big_size, big_size, 3]
decay_factor = 0.99
init_source_step = copy.deepcopy(source_step)
init_spherical_step = copy.deepcopy(spherical_step)
center_shape = [center_size, center_size, 3]
pert_shape = [int(shape[0]/resize_factor), int(shape[1]/resize_factor), 3]
if rescale_or_not == 1 or rescale_or_not == 5 or rescale_or_not == 55 or rescale_or_not == 29:
evolution_path = np.zeros(pert_shape , dtype=original.dtype)
diagonal_covariance = np.ones(pert_shape, dtype=original.dtype)
elif rescale_or_not == 4:
evolution_path = np.zeros(center_shape, dtype=original.dtype)
diagonal_covariance = np.ones(center_shape, dtype=original.dtype)
else:
evolution_path = np.zeros(big_shape, dtype=original.dtype)
diagonal_covariance = np.ones(big_shape, dtype=original.dtype)
c = 0.001
stats_step_adversarial = collections.deque(maxlen=20)
neg_improve_num = 0
evolutionary_doc = np.zeros(iterations)
best_dis = 0
success_num = 0
if rescale_or_not == 15 or rescale_or_not == 22 or rescale_or_not == 39 or rescale_or_not == 23 or rescale_or_not == 29 or rescale_or_not == 31 or rescale_or_not == 33 or rescale_or_not == 34 or rescale_or_not == 35: # 修正均值,定义是否学习的flag变量
amend_flag = False
amend_list = []
if rescale_or_not == 16:
amend_list = []
if rescale_or_not == 17:
amend = 0
if rescale_or_not == 18 or rescale_or_not == 19:
success_list = []
fail_list = []
if rescale_or_not == 37:
last_50_success = 0
if rescale_or_not == 21 or rescale_or_not == 24 or rescale_or_not == 25 or rescale_or_not == 26 or rescale_or_not == 27 or rescale_or_not == 28:
success_noise_list = [perturbed - original]
fail_noise_list = []
if rescale_or_not == 28:
temp_result, temp_logits = self.predictions(perturbed)
success_prob = [self.cw_prob_calculator(temp_logits, label)]
if rescale_or_not == 30 or rescale_or_not == 31:
temp_result, temp_logits = self.predictions(perturbed)
noise_list = [perturbed - original]
prob_list = [self.cw_prob_calculator(temp_logits, label)]
prob_saver = []
sample_num = 10
backup_perturbation = []
backup_prob = []
if rescale_or_not == 33:
prob_est = 0
for step in range(1, iterations + 1):
unnormalized_source_direction = original - perturbed
source_norm = norm(unnormalized_source_direction)
clipper_counter = 0
if rescale_or_not == 1:
perturbation = np.random.normal(0, 1, pert_shape)
perturbation *= np.sqrt(diagonal_covariance)
x = np.array(range(pert_shape[1]))
y = np.array(range(pert_shape[2]))
f1 = interpolate.interp2d(y, x, perturbation[0,:,:], kind='linear')
newx = np.linspace(0, pert_shape[1], shape[1])
newy = np.linspace(0, pert_shape[0], shape[0])
perturbation_mid = f1(newx, newy).reshape(shape[0], shape[1], 1)
perturbation_large = np.zeros([big_size, big_size, 1])
starting_pos = int((big_size - center_size) / 2)
perturbation_large[starting_pos:(starting_pos+center_size), starting_pos:(starting_pos+center_size), :] = perturbation_mid
elif rescale_or_not == 2:
perturbation_large = np.random.normal(0, 1, big_shape)
elif rescale_or_not == 3:
perturbation_large = ortho(np.reshape(unnormalized_source_direction, (-1)))
elif rescale_or_not == 4:
perturbation = np.random.normal(0, 1, center_shape)
perturbation *= np.sqrt(diagonal_covariance)
starting_pos = int((big_size - center_size) / 2)
perturbation_large = np.zeros([big_size, big_size, 3])
perturbation_large[starting_pos:(starting_pos+center_size), starting_pos:(starting_pos+center_size), :] = perturbation
elif rescale_or_not ==5:
perturbation = np.random.normal(0, 1, pert_shape)
perturbation *= np.sqrt(diagonal_covariance)
x = np.array(range(pert_shape[1]))
y = np.array(range(pert_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='linear')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='linear')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='linear')
newx = np.linspace(0, pert_shape[1], big_shape[1])
newy = np.linspace(0, pert_shape[0], big_shape[0])
perturbation_large = np.concatenate([f1(newx, newy).reshape(big_shape[0], big_shape[1], 1),
f2(newx, newy).reshape(big_shape[0], big_shape[1], 1),
f3(newx, newy).reshape(big_shape[0], big_shape[1], 1)], axis=2)
elif rescale_or_not == 6:
perturbation_large = np.random.normal(0, 1, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 7:
perturbation_large = np.random.normal(0, 1, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
perturbation_large *= np.sqrt(diagonal_covariance)
elif rescale_or_not ==55:
perturbation = np.random.normal(0, 1, pert_shape)
x = np.array(range(pert_shape[1]))
y = np.array(range(pert_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='linear')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='linear')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='linear')
newx = np.linspace(0, pert_shape[1], big_shape[1])
newy = np.linspace(0, pert_shape[0], big_shape[0])
perturbation_large = np.concatenate([f1(newx, newy).reshape(big_shape[0], big_shape[1], 1),
f2(newx, newy).reshape(big_shape[0], big_shape[1], 1),
f3(newx, newy).reshape(big_shape[0], big_shape[1], 1)], axis=2)
elif rescale_or_not == 8:
perturbation = np.random.normal(0, 1, big_shape)
x = np.array(range(big_shape[1]))
y = np.array(range(big_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='linear')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='linear')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='linear')
perturbation_large = np.concatenate([f1(x, y).reshape(big_shape[0], big_shape[1], 1),
f2(x, y).reshape(big_shape[0], big_shape[1], 1),
f3(x, y).reshape(big_shape[0], big_shape[1], 1)], axis=2)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 9:
perturbation = np.random.normal(0, 1, big_shape)
perturbation *= np.sqrt(diagonal_covariance)
x = np.array(range(big_shape[1]))
y = np.array(range(big_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='linear')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='linear')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='linear')
perturbation_large = np.concatenate([f1(x, y).reshape(big_shape[0], big_shape[1], 1),
f2(x, y).reshape(big_shape[0], big_shape[1], 1),
f3(x, y).reshape(big_shape[0], big_shape[1], 1)], axis=2)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 10:
perturbation = np.random.normal(0, 1, big_shape)
x = np.array(range(big_shape[1]))
y = np.array(range(big_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='cubic')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='cubic')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='cubic')
perturbation_large = np.concatenate([f1(x, y).reshape(big_shape[0], big_shape[1], 1),
f2(x, y).reshape(big_shape[0], big_shape[1], 1),
f3(x, y).reshape(big_shape[0], big_shape[1], 1)], axis=2)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 11:
perturbation = np.random.normal(0, 1, big_shape)
x = np.array(range(big_shape[1]))
y = np.array(range(big_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='quintic')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='quintic')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='quintic')
perturbation_large = np.concatenate([f1(x, y).reshape(big_shape[0], big_shape[1], 1),
f2(x, y).reshape(big_shape[0], big_shape[1], 1),
f3(x, y).reshape(big_shape[0], big_shape[1], 1)], axis=2)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 12:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 13:
while 1:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
line_candidate = perturbed + source_step * unnormalized_source_direction
candidate = line_candidate + spherical_step * source_norm * perturbation_large / max(norm(perturbation_large), 1e-6)
candidate = original - (original - candidate) / norm(original - candidate) * norm(original - line_candidate)
candidate = clip(candidate, min_, max_)
if l2_distance(original, candidate) < l2_distance(original, perturbed):
break
else:
clipper_counter += 1
elif rescale_or_not == 14:
var = np.abs(perturbed - original)
# print(np.max(var))
var+=1
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 15:
# print(amend_flag)
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
# print("mean", np.mean(mean))
perturbation_large = np.random.normal(mean, 1, big_shape)
else:
perturbation_large = np.random.normal(0, 1, big_shape)
elif rescale_or_not == 16:
# print(amend_flag)
if len(amend_list) != 0:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
# print("mean", np.mean(mean))
perturbation_large = np.random.normal(mean, 1, big_shape)
else:
perturbation_large = np.random.normal(0, 1, big_shape)
elif rescale_or_not == 17:
mean = -1*amend
perturbation_large = np.random.normal(mean, 1, big_shape)
elif rescale_or_not == 18:
if len(fail_list) >= 1 and len(success_list) >= 1:
fail_array = np.array(fail_list)
success_array = np.array(success_list)
while 1:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
temp_fail_dist = np.sqrt(np.sum((fail_array - perturbation_large)**2)/len(fail_list))
temp_success_dist = np.sqrt(np.sum((success_array - perturbation_large)**2)/len(success_list))
if temp_success_dist < 1.1*temp_fail_dist:
break
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 19:
if len(fail_list) >= 1 and len(success_list) >= 1:
fail_array = np.array(fail_list)
success_array = np.array(success_list)
while 1:
perturbation_large = np.random.normal(0, 1, big_shape)
temp_fail_dist = np.sqrt(np.sum((fail_array - perturbation_large)**2)/len(fail_list))
temp_success_dist = np.sqrt(np.sum((success_array - perturbation_large)**2)/len(success_list))
if temp_success_dist < 1.1*temp_fail_dist:
break
else:
perturbation_large = np.random.normal(0, 1, big_shape)
elif rescale_or_not == 20:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 21:
if len(fail_noise_list) >= 1 and len(success_noise_list) >= 1:
temp_line_candidate = perturbed + source_step * unnormalized_source_direction
while 1:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
temp_candidate = temp_line_candidate + spherical_step * source_norm * perturbation_large / max(norm(perturbation_large), 1e-6)
temp_candidate = original - (original - temp_candidate) / norm(original - temp_candidate) * norm(original - temp_line_candidate)
temp_candidate = clip(temp_candidate, min_, max_)
temp_input_noise = temp_candidate - original
fail_noise_dist = np.sqrt(np.sum((np.array(fail_noise_list) - temp_input_noise)**2)/len(fail_noise_list))
success_noise_dist = np.sqrt(np.sum((np.array(success_noise_list) - temp_input_noise)**2)/len(success_noise_list))
print(fail_noise_dist, success_noise_dist)
if success_noise_dist < 1.5*fail_noise_dist:
print(len(fail_noise_list), len(success_noise_list))
break
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 22 or rescale_or_not == 39:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 33:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 34:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 35:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = var / np.max(var)
perturbation_large *= mask
elif rescale_or_not == 23:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
# print("mean", np.mean(mean))
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 24:
success_noise_mean = np.mean(np.array(success_noise_list), axis=0)
# print(np.shape(success_noise_mean))
# print(np.max(success_noise_mean), np.min(success_noise_mean), np.mean(success_noise_mean))
var = np.abs(success_noise_mean)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 25:
total_noise_array = np.append(np.array(success_noise_list), -1*np.array(fail_noise_list))
total_noise_mean = np.mean(total_noise_array, axis=0)
var = np.abs(total_noise_mean)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 26:
success_noise_mean = np.mean(np.array(success_noise_list), axis=0)
var = np.abs(success_noise_mean)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 27:
total_noise_array = np.append(np.array(success_noise_list), np.array(fail_noise_list))
total_noise_mean = np.mean(total_noise_array, axis=0)
var = np.abs(total_noise_mean)
perturbation_large = np.random.normal(0, var, big_shape)
elif rescale_or_not == 28:
if len(success_noise_list)==1:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
else:
var_bias = np.array(success_noise_list[-1]) - np.array(success_noise_list[-2])
var_bias /= norm(var_bias)
var = np.abs(perturbed - original)
var /= norm(var)
if success_prob[-1] < success_prob[-2]:
# print("down")
var -= var_bias
var[var < 0] = 0
perturbation_large = np.random.normal(0, var, big_shape)
else:
var += var_bias
var[var < 0] = 0
perturbation_large = np.random.normal(0, var, big_shape)
# print("mean of var and var_bias", np.mean(np.abs(var)), np.mean(var_bias))
elif rescale_or_not == 29:
starting_pos = int((big_size - center_size) / 2)
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
else:
mean = 0
down_sample_noise = perturbed - original
down_sample_mid = down_sample_noise[starting_pos:(starting_pos+center_size), starting_pos:(starting_pos+center_size), :]
center_trans_shape = [1, 3, center_size, center_size]
down_sampler = torch.nn.AvgPool2d(resize_factor, stride=resize_factor)
trans_down_sample_mid = torch.from_numpy(np.reshape(np.transpose(down_sample_mid, (2, 0, 1)), center_trans_shape))
down_sample_small = down_sampler(trans_down_sample_mid).numpy()[0]
down_sample_small_noise = np.transpose(down_sample_small, (1,2,0))
perturbation = np.random.normal(mean, np.abs(down_sample_small_noise), pert_shape)
x = np.array(range(pert_shape[1]))
y = np.array(range(pert_shape[0]))
f1 = interpolate.interp2d(y, x,perturbation[:,:,0], kind='linear')
f2 = interpolate.interp2d(y, x,perturbation[:,:,1], kind='linear')
f3 = interpolate.interp2d(y, x,perturbation[:,:,2], kind='linear')
newx = np.linspace(0, pert_shape[1], shape[1])
newy = np.linspace(0, pert_shape[0], shape[0])
perturbation_mid = np.concatenate([f1(newx, newy).reshape(shape[0], shape[1], 1),
f2(newx, newy).reshape(shape[0], shape[1], 1),
f3(newx, newy).reshape(shape[0], shape[1], 1)], axis=2)
perturbation_large = np.zeros([big_size, big_size, 3])
perturbation_large[starting_pos:(starting_pos+center_size), starting_pos:(starting_pos+center_size), :] = perturbation_mid
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
if rescale_or_not == 32:
perturbation_large = np.random.normal(0, 1, big_shape)
elif rescale_or_not == 30:
prob_array = np.array(prob_list)
noise_array = np.array(noise_list)
if len(prob_array[prob_array>0]) >= sample_num:
if amend_flag == True:
temp_amend_array = np.array(amend_list)
mean = -1*np.mean(temp_amend_array, axis=0)
else:
mean = 0
temp_line_candidate = perturbed + source_step * unnormalized_source_direction
for temp_counter in range(5):
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(mean, var, big_shape)
temp_candidate = temp_line_candidate + spherical_step * source_norm * perturbation_large / max(norm(perturbation_large), 1e-6)
temp_candidate = original - (original - temp_candidate) / norm(original - temp_candidate) * norm(original - temp_line_candidate)
temp_candidate = clip(temp_candidate, min_, max_)
temp_input_noise = temp_candidate - original
dist_array = np.sqrt(np.sum((noise_array/255.0 - temp_input_noise/255.0)**2, axis=(1,2,3))) #距离数组
index = np.argsort(dist_array)
near_prob = prob_array.take(index[:sample_num])
mean_prob = np.mean(near_prob)
if mean_prob > np.mean(prob_array) or mean_prob > 0:
break
else:
backup_perturbation.append(perturbation_large)
backup_prob.append(mean_prob)
if temp_counter==4:
backup_prob_array = np.array(backup_prob)
backup_index = np.argsort(backup_prob_array)[-1]
perturbation_large = backup_perturbation[backup_index]
backup_prob[backup_index] = -9999
else:
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
mask = griddle(perturbed - original, rate)
perturbation_large *= mask
elif rescale_or_not == 31:
prob_array = np.array(prob_list)
noise_array = np.array(noise_list)
if len(prob_array[prob_array>0]) >= sample_num:
temp_line_candidate = perturbed + source_step * unnormalized_source_direction
for temp_counter in range(5):
var = np.abs(perturbed - original)
perturbation_large = np.random.normal(0, var, big_shape)
temp_candidate = temp_line_candidate + spherical_step * source_norm * perturbation_large / max(norm(perturbation_large), 1e-6)
temp_candidate = original - (original - temp_candidate) / norm(original - temp_candidate) * norm(original - temp_line_candidate)
temp_candidate = clip(temp_candidate, min_, max_)
temp_input_noise = temp_candidate - original
dist_array = np.sqrt(np.sum((noise_array/255.0 - temp_input_noise/255.0)**2, axis=(1,2,3))) #距离数组
index = np.argsort(dist_array)
near_prob = prob_array.take(index[:sample_num])
# print("near_prob", near_prob)
mean_prob = np.mean(near_prob)
if mean_prob >
|
np.mean(prob_array)
|
numpy.mean
|
import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import astropy.units as u
from astropy.coordinates import angles
#import AngularSeparation
from astropy import coordinates as coord
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
import ClusterScalingRelations as clsr
from scipy.interpolate import interp1d
import StellarMass as sm
smhmr = sm.StellarMass()
scl = clsr.ClusterScalingRelations_Mantz2016()
cat = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1.fits.gz'))[1].data
spm = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1_spm.fits'))[1].data
volume_rough = aa.comoving_volume(0.5)*2200.*n.pi/129600
volume = volume_rough.value
# get cluster center
# distance to center
# rescale to r200c_deg
# get the latest min(ages) of the ssp
# compute SFR
# now looks at individual galaxies
# and gets the highest SFR for each galaxy
# youngest age
highest_sfrs = []
youngest_ages = []
sep_r200c = []
for cc in cat:
center = coord.ICRS(ra=cc['RA_OPT']*u.degree, dec=cc['DEC_OPT']*u.degree)
gal = (spm['CLUS_ID']==cc['CLUS_ID'])
#all_members = coord.ICRS()
#separations = center.separation(all_members)/(cc['R200C_DEG']*u.degree)).value
for id_cc, (pla, mjd, fib) in enumerate(zip(cc['ALLPLATE'][:len(gal.nonzero()[0])], cc['ALLMJD'][:len(gal.nonzero()[0])], cc['ALLFIBERID'][:len(gal.nonzero()[0])])):
sel = (gal) & (spm['PLATE']==pla) & (spm['MJD']==mjd) & (spm['FIBERID']==fib)
if len(sel.nonzero()[0])>0 :
n_cp = spm['Chabrier_MILES_nComponentsSSP'][sel].astype('int')[0]
if n_cp > 0 :
all_ages = n.array([ spm['Chabrier_MILES_age_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ])
all_masses = n.array([ spm['Chabrier_MILES_stellar_mass_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ])
sfr_inst = all_masses / all_ages
youngest_ages.append(n.min(all_ages))
highest_sfrs.append(n.max(sfr_inst))
position = coord.ICRS(cc['ALLRA'][id_cc]*u.degree, cc['ALLDEC'][id_cc]*u.degree)
sep_r200c.append( (center.separation(position)/(cc['R200C_DEG']*u.degree)).value )
highest_sfrs = n.array(highest_sfrs)
youngest_ages = n.array(youngest_ages)
sep_r200c = n.array(sep_r200c)
p.figure(1, (5,5))
p.title('SPIDERS')
p.plot(sep_r200c, highest_sfrs, 'r+')
p.xlabel('r/r200c')
p.ylabel('SFR [Msun/yr]')
#p.xscale('log')
p.yscale('log')
p.xlim((0.08,1.5))
p.grid()
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR.png'))
p.clf()
dx = ( n.max(sep_r200c) - n.min(sep_r200c) ) /3.
r_b = n.arange(n.min(sep_r200c), n.max(sep_r200c) + dx, dx)
p.figure(1, (5,5))
for ii,bb in enumerate(r_b[:-1]):
sub = (sep_r200c>bb)&(sep_r200c<r_b[ii+1])
p.hist(highest_sfrs[sub], label=str(n.round(bb,3))+"<"+str(n.round(r_b[ii+1],3)), cumulative=True, normed=True, histtype='step')
p.ylabel('normed cumulative distribution')
p.xlabel('SFR [Msun/yr]')
p.xscale('log')
p.ylim((-0.01, 1.01))
p.grid()
p.legend(frameon=False, loc=0)
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR-histograms.png'))
p.clf()
p.figure(1, (5,5))
p.title('SPIDERS')
p.plot(sep_r200c, youngest_ages, 'r+')
p.xlabel('r/r200c')
p.ylabel('age [yr]')
p.xscale('log')
p.yscale('log')
p.xlim((0.1,5))
p.grid()
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-AGE.png'))
p.clf()
p.figure(1, (5,5))
p.title('SPIDERS DR14 galaxies')
p.plot(spm['Z'], spm["Chabrier_MILES_stellar_mass"], 'b,', label='targets')
p.plot(z, y, 'r,', label='cluster members')
p.xlabel('redshift')
p.ylabel('stellar mass [Msun]')
#p.xscale('log')
p.yscale('log')
p.xlim((0,0.7))
p.ylim((1e9,1e12))
p.grid()
p.legend(frameon=False, loc=0)
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'redshift-mass.png'))
p.clf()
logm2x = n.hstack((m2x))
bins=n.arange(-7, 0.5, 0.1)
basis = (n.isnan(logm2x)==False)&(logm2x != -n.inf)&(logm2x != n.inf)
arbitrary_factor =5.
p.figure(1, (5,5))
ok = (basis)&(x>1e44)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44')
ok = (basis)&(x>10**44.5)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44.5')
ok = (basis)&(x>1e45)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2.,
|
n.log10(out/arbitrary_factor)
|
numpy.log10
|
# -*- coding: utf-8 -*-
__all__ = [
'MarkovChain'
]
###########
# IMPORTS #
###########
# Standard
from copy import (
deepcopy as _deepcopy
)
from inspect import (
getmembers as _ins_getmembers,
isfunction as _ins_isfunction,
stack as _ins_stack,
trace as _ins_trace
)
from itertools import (
chain as _it_chain
)
from math import (
gcd as _math_gcd
)
# Libraries
import networkx as _nx
import numpy as _np
import numpy.linalg as _npl
# Internal
from .base_class import (
BaseClass as _BaseClass
)
from .computations import (
calculate_periods as _calculate_periods,
eigenvalues_sorted as _eigenvalues_sorted,
find_cyclic_classes as _find_cyclic_classes,
find_lumping_partitions as _find_lumping_partitions,
gth_solve as _gth_solve,
rdl_decomposition as _rdl_decomposition,
slem as _slem
)
from .custom_types import (
oarray as _oarray,
ofloat as _ofloat,
oint as _oint,
ointerval as _ointerval,
olist_str as _olist_str,
onumeric as _onumeric,
ostate as _ostate,
ostates as _ostates,
ostatus as _ostatus,
otimes_out as _otimes_out,
owalk as _owalk,
tany as _tany,
tarray as _tarray,
tbcond as _tbcond,
tcache as _tcache,
tgraph as _tgraph,
tgraphs as _tgraphs,
tlist_array as _tlist_array,
tlist_int as _tlist_int,
tlist_str as _tlist_str,
tlists_int as _tlists_int,
tlists_str as _tlists_str,
tmc as _tmc,
tmc_dict as _tmc_dict,
tmc_dict_flex as _tmc_dict_flex,
tnumeric as _tnumeric,
tpart as _tpart,
tparts as _tparts,
trdl as _trdl,
tredists as _tredists,
tstate as _tstate,
tstates as _tstates,
ttfunc as _ttfunc,
ttimes_in as _ttimes_in,
twalk as _twalk,
tweights as _tweights
)
from .decorators import (
alias as _alias,
aliased as _aliased,
cached_property as _cached_property,
random_output as _random_output
)
from .exceptions import (
ValidationError as _ValidationError
)
from .files_io import (
read_csv as _read_csv,
read_json as _read_json,
read_txt as _read_txt,
read_xml as _read_xml,
write_csv as _write_csv,
write_json as _write_json,
write_txt as _write_txt,
write_xml as _write_xml
)
from .fitting import (
fit_function as _fit_function,
fit_walk as _fit_walk
)
from .generators import (
approximation as _approximation,
birth_death as _birth_death,
bounded as _bounded,
canonical as _canonical,
closest_reversible as _closest_reversible,
gamblers_ruin as _gamblers_ruin,
lazy as _lazy,
lump as _lump,
random as _random,
sub as _sub,
urn_model as _urn_model
)
from .measures import (
absorption_probabilities as _absorption_probabilities,
committor_probabilities as _committor_probabilities,
expected_rewards as _expected_rewards,
expected_transitions as _expected_transitions,
first_passage_reward as _first_passage_reward,
first_passage_probabilities as _first_passage_probabilities,
hitting_probabilities as _hitting_probabilities,
hitting_times as _hitting_times,
mean_absorption_times as _mean_absorption_times,
mean_first_passage_times_between as _mean_first_passage_times_between,
mean_first_passage_times_to as _mean_first_passage_times_to,
mean_number_visits as _mean_number_visits,
mean_recurrence_times as _mean_recurrence_times,
mixing_time as _mixing_time,
sensitivity as _sensitivity,
time_correlations as _time_correlations,
time_relaxations as _time_relaxations
)
from .simulations import (
predict as _predict,
redistribute as _redistribute,
simulate as _simulate,
walk_probability as _walk_probability
)
from .utilities import (
create_rng as _create_rng,
generate_validation_error as _generate_validation_error
)
from .validation import (
validate_boolean as _validate_boolean,
validate_boundary_condition as _validate_boundary_condition,
validate_dictionary as _validate_dictionary,
validate_enumerator as _validate_enumerator,
validate_file_path as _validate_file_path,
validate_float as _validate_float,
validate_graph as _validate_graph,
validate_hyperparameter as _validate_hyperparameter,
validate_integer as _validate_integer,
validate_interval as _validate_interval,
validate_mask as _validate_mask,
validate_matrix as _validate_matrix,
validate_partitions as _validate_partitions,
validate_rewards as _validate_rewards,
validate_state as _validate_state,
validate_state_names as _validate_state_names,
validate_states as _validate_states,
validate_status as _validate_status,
validate_time_points as _validate_time_points,
validate_transition_function as _validate_transition_function,
validate_transition_matrix as _validate_transition_matrix,
validate_vector as _validate_vector
)
###########
# CLASSES #
###########
@_aliased
class MarkovChain(metaclass=_BaseClass):
"""
Defines a Markov chain with given transition matrix and state names.
:param p: the transition matrix.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
def __init__(self, p: _tnumeric, states: _olist_str = None):
caller = _ins_stack()[1][3]
sm = [x[1].__name__ for x in _ins_getmembers(MarkovChain, predicate=_ins_isfunction) if x[1].__name__[0] != '_' and isinstance(MarkovChain.__dict__.get(x[1].__name__), staticmethod)]
if caller not in sm:
try:
p = _validate_transition_matrix(p)
states = [str(i) for i in range(1, p.shape[0] + 1)] if states is None else _validate_state_names(states, p.shape[0])
except Exception as e: # pragma: no cover
raise _generate_validation_error(e, _ins_trace()) from None
size = p.shape[0]
graph = _nx.DiGraph(p)
graph = _nx.relabel_nodes(graph, dict(zip(range(size), states)))
self.__cache: _tcache = {}
self.__digraph: _tgraph = graph
self.__p: _tarray = p
self.__size: int = size
self.__states: _tlist_str = states
def __eq__(self, other) -> bool:
if isinstance(other, MarkovChain):
return _np.array_equal(self.p, other.p) and self.states == other.states
return False
def __hash__(self) -> int:
return hash((self.p.tobytes(), tuple(self.states)))
def __repr__(self) -> str:
return self.__class__.__name__
# noinspection PyListCreation
def __str__(self) -> str:
lines = ['']
lines.append('DISCRETE-TIME MARKOV CHAIN')
lines.append(f' SIZE: {self.size:d}')
lines.append(f' RANK: {self.rank:d}')
lines.append(f' CLASSES: {len(self.communicating_classes):d}')
lines.append(f' > RECURRENT: {len(self.recurrent_classes):d}')
lines.append(f' > TRANSIENT: {len(self.transient_classes):d}')
lines.append(f' ERGODIC: {("YES" if self.is_ergodic else "NO")}')
lines.append(f' > APERIODIC: {("YES" if self.is_aperiodic else "NO (" + str(self.period) + ")")}')
lines.append(f' > IRREDUCIBLE: {("YES" if self.is_irreducible else "NO")}')
lines.append(f' ABSORBING: {("YES" if self.is_absorbing else "NO")}')
lines.append(f' REGULAR: {("YES" if self.is_regular else "NO")}')
lines.append(f' REVERSIBLE: {("YES" if self.is_reversible else "NO")}')
lines.append(f' SYMMETRIC: {("YES" if self.is_symmetric else "NO")}')
lines.append('')
value = '\n'.join(lines)
return value
@_cached_property
def __absorbing_states_indices(self) -> _tlist_int:
indices = [index for index in range(self.__size) if _np.isclose(self.__p[index, index], 1.0)]
return indices
@_cached_property
def __classes_indices(self) -> _tlists_int:
indices = [sorted([self.__states.index(c) for c in scc]) for scc in _nx.strongly_connected_components(self.__digraph)]
return indices
@_cached_property
def __communicating_classes_indices(self) -> _tlists_int:
indices = sorted(self.__classes_indices, key=lambda x: (-len(x), x[0]))
return indices
@_cached_property
def _cyclic_classes_indices(self) -> _tlists_int:
if not self.is_irreducible:
return []
if self.is_aperiodic:
return self.__communicating_classes_indices.copy()
indices = _find_cyclic_classes(self.__p)
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@_cached_property
def __cyclic_states_indices(self) -> _tlist_int:
indices = sorted(_it_chain.from_iterable(self._cyclic_classes_indices))
return indices
@_cached_property
def __eigenvalues_sorted(self) -> _tarray:
ev = _eigenvalues_sorted(self.__p)
return ev
@_cached_property
def __rdl_decomposition(self) -> _trdl:
r, d, l = _rdl_decomposition(self.__p) # noqa
return r, d, l
@_cached_property
def __recurrent_classes_indices(self) -> _tlists_int:
indices = [index for index in self.__classes_indices if index not in self.__transient_classes_indices]
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@_cached_property
def __recurrent_states_indices(self) -> _tlist_int:
indices = sorted(_it_chain.from_iterable(self.__recurrent_classes_indices))
return indices
@_cached_property
def __slem(self) -> _ofloat:
if not self.is_ergodic:
value = None
else:
value = _slem(self.__p)
return value
@_cached_property
def __states_indices(self) -> _tlist_int:
indices = list(range(self.__size))
return indices
@_cached_property
def __transient_classes_indices(self) -> _tlists_int:
edges = {edge1 for (edge1, edge2) in _nx.condensation(self.__digraph).edges}
indices = [self.__classes_indices[edge] for edge in edges]
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@_cached_property
def __transient_states_indices(self) -> _tlist_int:
indices = sorted(_it_chain.from_iterable(self.__transient_classes_indices))
return indices
@_cached_property
def absorbing_states(self) -> _tlists_str:
"""
A property representing the absorbing states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__absorbing_states_indices)]
return states
@_cached_property
def accessibility_matrix(self) -> _tarray:
"""
A property representing the accessibility matrix of the Markov chain.
"""
a = self.adjacency_matrix
i = _np.eye(self.__size, dtype=int)
am = (i + a)**(self.__size - 1)
am = (am > 0).astype(int)
return am
@_cached_property
def adjacency_matrix(self) -> _tarray:
"""
A property representing the adjacency matrix of the Markov chain.
"""
am = (self.__p > 0.0).astype(int)
return am
@_cached_property
def communicating_classes(self) -> _tlists_str:
"""
A property representing the communicating classes of the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self.__communicating_classes_indices]
return classes
@_cached_property
def communication_matrix(self) -> _tarray:
"""
A property representing the communication matrix of the Markov chain.
"""
cm = _np.zeros((self.__size, self.__size), dtype=int)
for index in self.__communicating_classes_indices:
cm[_np.ix_(index, index)] = 1
return cm
@_cached_property
def cyclic_classes(self) -> _tlists_str:
"""
A property representing the cyclic classes of the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self._cyclic_classes_indices]
return classes
@_cached_property
def cyclic_states(self) -> _tlists_str:
"""
A property representing the cyclic states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__cyclic_states_indices)]
return states
@_cached_property
def determinant(self) -> float:
"""
A property representing the determinant of the transition matrix of the Markov chain.
"""
d = _npl.det(self.__p)
return d
@_cached_property
def entropy_rate(self) -> _ofloat:
"""
| A property representing the entropy rate of the Markov chain.
| If the Markov chain has multiple stationary distributions, then :py:class:`None` is returned.
"""
if len(self.pi) > 1:
return None
pi = self.pi[0]
h = 0.0
for i in range(self.__size):
for j in range(self.__size):
if self.__p[i, j] > 0.0:
h += pi[i] * self.__p[i, j] * _np.log(self.__p[i, j])
if
|
_np.isclose(h, 0.0)
|
numpy.isclose
|
# ----------------------------------------------------------------------------
# - TanksAndTemples Website Toolbox -
# - http://www.tanksandtemples.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017
# <NAME> <<EMAIL> >
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ----------------------------------------------------------------------------
#
# This python script is for downloading dataset from www.tanksandtemples.org
# The dataset has a different license, please refer to
# https://tanksandtemples.org/license/
import json
import copy
import os
import numpy as np
import open3d as o3d
import matplotlib.pyplot as plt
def read_alignment_transformation(filename):
with open(filename) as data_file:
data = json.load(data_file)
return np.asarray(data["transformation"]).reshape((4, 4)).transpose()
def write_color_distances(path, pcd, distances, max_distance):
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
# cmap = plt.get_cmap("afmhot")
cmap = plt.get_cmap("hot_r")
distances = np.array(distances)
colors = cmap(np.minimum(distances, max_distance) / max_distance)[:, :3]
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.io.write_point_cloud(path, pcd)
def EvaluateHisto(
source,
target,
trans,
crop_volume,
voxel_size,
threshold,
filename_mvs,
plot_stretch,
scene_name,
verbose=True,
):
print("[EvaluateHisto]")
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
s = copy.deepcopy(source)
s.transform(trans)
s = crop_volume.crop_point_cloud(s)
s = s.voxel_down_sample(voxel_size)
s.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=20))
print(filename_mvs + "/" + scene_name + ".precision.ply")
t = copy.deepcopy(target)
t = crop_volume.crop_point_cloud(t)
t = t.voxel_down_sample(voxel_size)
t.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=20))
print("[compute_point_cloud_to_point_cloud_distance]")
distance1 = s.compute_point_cloud_distance(t)
print("[compute_point_cloud_to_point_cloud_distance]")
distance2 = t.compute_point_cloud_distance(s)
# write the distances to bin files
# np.array(distance1).astype("float64").tofile(
# filename_mvs + "/" + scene_name + ".precision.bin"
# )
# np.array(distance2).astype("float64").tofile(
# filename_mvs + "/" + scene_name + ".recall.bin"
# )
# Colorize the poincloud files prith the precision and recall values
# o3d.io.write_point_cloud(
# filename_mvs + "/" + scene_name + ".precision.ply", s
# )
# o3d.io.write_point_cloud(
# filename_mvs + "/" + scene_name + ".precision.ncb.ply", s
# )
# o3d.io.write_point_cloud(filename_mvs + "/" + scene_name + ".recall.ply", t)
source_n_fn = filename_mvs + "/" + scene_name + ".precision.ply"
target_n_fn = filename_mvs + "/" + scene_name + ".recall.ply"
print("[ViewDistances] Add color coding to visualize error")
# eval_str_viewDT = (
# OPEN3D_EXPERIMENTAL_BIN_PATH
# + "ViewDistances "
# + source_n_fn
# + " --max_distance "
# + str(threshold * 3)
# + " --write_color_back --without_gui"
# )
# os.system(eval_str_viewDT)
write_color_distances(source_n_fn, s, distance1, 3 * threshold)
print("[ViewDistances] Add color coding to visualize error")
# eval_str_viewDT = (
# OPEN3D_EXPERIMENTAL_BIN_PATH
# + "ViewDistances "
# + target_n_fn
# + " --max_distance "
# + str(threshold * 3)
# + " --write_color_back --without_gui"
# )
# os.system(eval_str_viewDT)
write_color_distances(target_n_fn, t, distance2, 3 * threshold)
# get histogram and f-score
[
precision,
recall,
fscore,
edges_source,
cum_source,
edges_target,
cum_target,
] = get_f1_score_histo2(
threshold, filename_mvs, plot_stretch, distance1, distance2
)
np.savetxt(filename_mvs + "/" + scene_name + ".recall.txt", cum_target)
np.savetxt(filename_mvs + "/" + scene_name + ".precision.txt", cum_source)
np.savetxt(
filename_mvs + "/" + scene_name + ".prf_tau_plotstr.txt",
np.array([precision, recall, fscore, threshold, plot_stretch]),
)
return [
precision,
recall,
fscore,
edges_source,
cum_source,
edges_target,
cum_target,
]
def get_f1_score_histo2(
threshold, filename_mvs, plot_stretch, distance1, distance2, verbose=True
):
print("[get_f1_score_histo2]")
dist_threshold = threshold
if len(distance1) and len(distance2):
recall = float(sum(d < threshold for d in distance2)) / float(
len(distance2)
)
precision = float(sum(d < threshold for d in distance1)) / float(
len(distance1)
)
fscore = 2 * recall * precision / (recall + precision)
num = len(distance1)
bins = np.arange(0, dist_threshold * plot_stretch, dist_threshold / 100)
hist, edges_source = np.histogram(distance1, bins)
cum_source = np.cumsum(hist).astype(float) / num
num = len(distance2)
bins = np.arange(0, dist_threshold * plot_stretch, dist_threshold / 100)
hist, edges_target = np.histogram(distance2, bins)
cum_target =
|
np.cumsum(hist)
|
numpy.cumsum
|
import os, sys, inspect
import numpy as np
from copy import deepcopy
from ..mfbase import MFDataException
def get_first_val(arr):
while isinstance(arr, list) or isinstance(arr, np.ndarray):
arr = arr[0]
return arr
def clean_name(name):
# remove bad characters
clean_string = name.replace(' ', '_')
clean_string = clean_string.replace('-', '_')
# remove anything after a parenthesis
index = clean_string.find('(')
if index != -1:
clean_string = clean_string[0:index]
return clean_string
def find_keyword(arr_line, keyword_dict):
# convert to lower case
arr_line_lower = []
for word in arr_line:
# integers and floats are not keywords
if not DatumUtil.is_int(word) and not DatumUtil.is_float(word):
arr_line_lower.append(word.lower())
# look for constants in order of most words to least words
key = ''
for num_words in range(len(arr_line_lower), -1, -1):
key = tuple(arr_line_lower[0:num_words])
if len(key) > 0 and key in keyword_dict:
return key
return None
def max_tuple_abs_size(some_tuple):
max_size = 0
for item in some_tuple:
item_abs = abs(item)
if item_abs > max_size:
max_size = item_abs
return max_size
class TemplateGenerator(object):
"""
Abstract base class for building a data template for different data types.
This is a generic class that is initialized with a path that identifies
the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
"""
def __init__(self, path):
self.path = path
def _get_data_dimensions(self, model):
from ..data import mfstructure
from ..coordinates import modeldimensions
# get structure info
sim_struct = mfstructure.MFStructure().sim_struct
package_struct = sim_struct.get_data_structure(self.path[0:-2])
# get dimension info
data_struct = sim_struct.get_data_structure(self.path)
package_dim = modeldimensions.PackageDimensions([model.dimensions],
package_struct,
self.path[0:-1])
return data_struct, modeldimensions.DataDimensions(package_dim,
data_struct)
def build_type_header(self, type, data=None):
from ..data.mfdata import DataStorageType
if type == DataStorageType.internal_array:
if isinstance(self, ArrayTemplateGenerator):
return {'factor':1.0, 'iprn':1, 'data':data}
else:
return None
elif type == DataStorageType.internal_constant:
return data
elif type == DataStorageType.external_file:
return {'filename':'', 'factor':1.0, 'iprn':1}
return None
class ArrayTemplateGenerator(TemplateGenerator):
"""
Class that builds a data template for MFArrays. This is a generic class
that is initialized with a path that identifies the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
Methods
-------
empty: (model: MFModel, layered: boolean, data_storage_type_list: boolean,
default_value: int/float) : variable
Builds a template for the data you need to specify for a specific data
type (ie. "hk") in a specific model. The data type and dimensions
is determined by "path" during initialization of this class and the
model is passed in to this method as the "model" parameter. If the
data is transient a dictionary containing a single stress period
will be returned. If "layered" is set to true, data will be returned
as a list ndarrays, one for each layer. data_storage_type_list is a
list of DataStorageType, one type for each layer. If "default_value"
is specified the data template will be populated with that value,
otherwise each ndarray in the data template will be populated with
np.empty (0 or 0.0 if the DataStorageType is a constant).
"""
def __init__(self, path):
super(ArrayTemplateGenerator, self).__init__(path)
def empty(self, model=None, layered=False, data_storage_type_list=None,
default_value=None):
from ..data import mfdata, mfstructure
from ..data.mfdata import DataStorageType
# get the expected dimensions of the data
data_struct, data_dimensions = self._get_data_dimensions(model)
datum_type = data_struct.get_datum_type()
data_type = data_struct.get_datatype()
# build a temporary data storage object
data_storage = mfdata.DataStorage(
model.simulation_data, data_dimensions, None,
mfdata.DataStorageType.internal_array,
mfdata.DataStructureType.recarray)
dimension_list = data_storage.get_data_dimensions(None)
# if layered data
if layered and dimension_list[0] > 1:
if data_storage_type_list is not None and \
len(data_storage_type_list) != dimension_list[0]:
comment = 'data_storage_type_list specified with the ' \
'wrong size. Size {} but expected to be ' \
'the same as the number of layers, ' \
'{}.'.format(len(data_storage_type_list),
dimension_list[0])
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(data_struct.get_model(),
data_struct.get_package(),
data_struct.path,
'generating array template',
data_struct.name,
inspect.stack()[0][3],
type_, value_, traceback_, comment,
model.simulation_data.debug)
# build each layer
data_with_header = []
for layer in range(0, dimension_list[0]):
# determine storage type
if data_storage_type_list is None:
data_storage_type = DataStorageType.internal_array
else:
data_storage_type = data_storage_type_list[layer]
# build data type header
data_with_header.append(self._build_layer(datum_type,
data_storage_type,
default_value,
dimension_list))
else:
if data_storage_type_list is None or \
data_storage_type_list[0] == \
DataStorageType.internal_array:
data_storage_type = DataStorageType.internal_array
else:
data_storage_type = data_storage_type_list[0]
# build data type header
data_with_header = self._build_layer(datum_type,
data_storage_type,
default_value,
dimension_list, True)
# if transient/multiple list
if data_type == mfstructure.DataType.array_transient:
# Return as dictionary
return {0:data_with_header}
else:
return data_with_header
def _build_layer(self, data_type, data_storage_type, default_value,
dimension_list, all_layers=False):
from ..data.mfdata import DataStorageType
# build data
if data_storage_type == DataStorageType.internal_array:
if default_value is None:
if all_layers:
data = np.empty(dimension_list, data_type)
else:
data = np.empty(dimension_list[1:], data_type)
else:
if all_layers:
data = np.full(dimension_list, default_value, data_type)
else:
data = np.full(dimension_list[1:], default_value,
data_type)
elif data_storage_type == DataStorageType.internal_constant:
if default_value is None:
if data_type == np.int:
data = 0
else:
data = 0.0
else:
data = default_value
else:
data = None
# build data type header
return self.build_type_header(data_storage_type, data)
class ListTemplateGenerator(TemplateGenerator):
"""
Class that builds a data template for MFLists. This is a generic class
that is initialized with a path that identifies the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
Methods
-------
empty: (maxbound: int, aux_vars: list, boundnames: boolean, nseg: int) :
dictionary
Builds a template for the data you need to specify for a specific data
type (ie. "stress_period_data") in a specific model. The data type is
determined by "path" during initialization of this class. If the data
is transient a dictionary containing a single stress period will be
returned. The number of entries in the recarray are determined by
the "maxbound" parameter. The "aux_vars" parameter is a list of aux
var names to be used in this data list. If boundnames is set to
true and boundname field will be included in the recarray. nseg is
only used on list data that contains segments. If timeseries is true,
a template that is compatible with time series data is returned.
"""
def __init__(self, path):
super(ListTemplateGenerator, self).__init__(path)
def _build_template_data(self, type_list):
template_data = []
for type in type_list:
if type[1] == int:
template_data.append(0)
elif type[1] == float:
template_data.append(np.nan)
else:
template_data.append(None)
return tuple(template_data)
def empty(self, model, maxbound=None, aux_vars=None, boundnames=False,
nseg=None, timeseries=False, stress_periods=None):
from ..data import mfdata, mfstructure
data_struct, data_dimensions = self._get_data_dimensions(model)
data_type = data_struct.get_datatype()
# build a temporary data storage object
data_storage = mfdata.DataStorage(
model.simulation_data, data_dimensions, None,
mfdata.DataStorageType.internal_array,
mfdata.DataStructureType.recarray)
# build type list
type_list = data_storage.build_type_list(nseg=nseg)
if aux_vars is not None:
if len(aux_vars) > 0 and (isinstance(aux_vars[0], list) or
isinstance(aux_vars[0], tuple)):
aux_vars = aux_vars[0]
for aux_var in aux_vars:
type_list.append((aux_var, object))
if boundnames:
type_list.append(('boundnames', object))
if timeseries:
# fix type list to make all types objects
for index in range(0, len(type_list)):
type_list[index] = (type_list[index][0], object)
# build recarray
template_data = self._build_template_data(type_list)
rec_array_data = []
if maxbound is not None:
for index in range(0, maxbound):
rec_array_data.append(template_data)
else:
rec_array_data.append(template_data)
rec_array = np.rec.array(rec_array_data, type_list)
# if transient/multiple list
if data_type == mfstructure.DataType.list_transient or \
data_type == mfstructure.DataType.list_multiple:
# Return as dictionary
if stress_periods is None:
return {0:rec_array}
else:
template = {}
for stress_period in stress_periods:
template[stress_period] = deepcopy(rec_array)
return template
else:
return rec_array
class DatumUtil(object):
@ staticmethod
def is_int(str):
try:
int(str)
return True
except TypeError:
return False
except ValueError:
return False
@ staticmethod
def is_float(str):
try:
float(str)
return True
except TypeError:
return False
except ValueError:
return False
class ArrayUtil(object):
"""
Class contains miscellaneous methods to work with and compare arrays
Parameters
----------
path : string
file path to read/write to
max_error : float
maximum acceptable error when doing a compare of floating point numbers
Methods
-------
is_empty_list : (current_list : list) : boolean
determines if an n-dimensional list is empty
con_convert : (data : string, data_type : type that has conversion
operation) : boolean
returns true if data can be converted into data_type
max_multi_dim_list_size : (current_list : list) : boolean
determines the max number of items in a multi-dimensional list
'current_list'
first_item : (current_list : list) : variable
returns the first item in the list 'current_list'
next_item : (current_list : list) : variable
returns the next item in the list 'current_list'
array_comp : (first_array : list, second_array : list) : boolean
compares two lists, returns true if they are identical (with max_error)
spilt_data_line : (line : string) : list
splits a string apart (using split) and then cleans up the results
dealing with various MODFLOW input file related delimiters. returns
the delimiter type used.
clean_numeric : (text : string) : string
returns a cleaned up version of 'text' with only numeric characters
save_array_diff : (first_array : list, second_array : list,
first_array_name : string, second_array_name : string)
saves lists 'first_array' and 'second_array' to files first_array_name
and second_array_name and then saves the difference of the two
arrays to 'debug_array_diff.txt'
save_array(filename : string, multi_array : list)
saves 'multi_array' to the file 'filename'
"""
numeric_chars = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0,
'6': 0, '7': 0, '8': 0, '9': 0, '.': 0, '-': 0}
quote_list = {"'", '"'}
delimiter_list = {',': 0, '\t': 0, ' ': 0}
delimiter_used = None
line_num = 0
consistent_delim = False
def __init__(self, path=None, max_error=0.01):
self.max_error = max_error
if path:
self.path = path
else:
self.path = os.getcwd()
@staticmethod
def has_one_item(current_list):
if not isinstance(current_list, list) and not isinstance(current_list,
np.ndarray):
return True
if len(current_list) != 1:
return False
if (isinstance(current_list[0], list) or
isinstance(current_list, np.ndarray)) and \
len(current_list[0] != 0):
return False
return True
@staticmethod
def is_empty_list(current_list):
if not isinstance(current_list, list):
return not current_list
for item in current_list:
if isinstance(item, list):
# still in a list of lists, recurse
if not ArrayUtil.is_empty_list(item):
return False
else:
return False
return True
@staticmethod
def max_multi_dim_list_size(current_list):
max_length = -1
for item in current_list:
if len(item) > max_length:
max_length = len(item)
return max_length
@staticmethod
def first_item(current_list):
if not isinstance(current_list, list):
return current_list
for item in current_list:
if isinstance(item, list):
# still in a list of lists, recurse
return ArrayUtil.first_item(item)
else:
return item
@staticmethod
def next_item(current_list, new_list=True, nesting_change=0,
end_of_list=True):
# returns the next item in a nested list along with other information:
# (<next item>, <end of list>, <entering new list>,
# <change in nesting level>
if not isinstance(current_list, list) and \
not isinstance(current_list, np.ndarray):
yield (current_list, end_of_list, new_list, nesting_change)
else:
list_size = 1
for item in current_list:
if isinstance(item, list) or isinstance(current_list,
np.ndarray):
# still in a list of lists, recurse
for item in ArrayUtil.next_item(item, list_size == 1,
nesting_change + 1,
list_size ==
len(current_list)):
yield item
nesting_change = -(nesting_change + 1)
else:
yield (item, list_size == len(current_list),
list_size == 1, nesting_change)
nesting_change = 0
list_size += 1
@staticmethod
def next_list(current_list):
if not isinstance(current_list[0], list):
yield current_list
else:
for lst in current_list:
if isinstance(lst[0], list):
for lst in ArrayUtil.next_list(lst):
yield lst
else:
yield lst
def array_comp(self, first_array, second_array):
diff = first_array - second_array
max = np.max(np.abs(diff))
if max > self.max_error:
return False
return True
@staticmethod
def reset_delimiter_used():
ArrayUtil.delimiter_used = None
ArrayUtil.line_num = 0
ArrayUtil.consistent_delim = True
@staticmethod
def split_data_line(line, external_file=False, delimiter_conf_length=15):
if ArrayUtil.line_num > delimiter_conf_length and \
ArrayUtil.consistent_delim:
# consistent delimiter has been found. continue using that
# delimiter without doing further checks
if ArrayUtil.delimiter_used == None:
clean_line = line.strip().split()
else:
clean_line = line.strip().split(ArrayUtil.delimiter_used)
else:
clean_line = line.strip().split()
if external_file:
# try lots of different delimiters for external files and use the
# one the breaks the data apart the most
max_split_size = len(clean_line)
max_split_type = None
for delimiter in ArrayUtil.delimiter_list:
alt_split = line.strip().split(delimiter)
if len(alt_split) > max_split_size:
max_split_size = len(alt_split)
max_split_type = delimiter
if max_split_type is not None:
clean_line = line.strip().split(max_split_type)
if ArrayUtil.line_num == 0:
ArrayUtil.delimiter_used = max_split_type
elif ArrayUtil.delimiter_used != max_split_type:
ArrayUtil.consistent_delim = False
ArrayUtil.line_num += 1
arr_fixed_line = []
index = 0
# loop through line to fix quotes and delimiters
while index < len(clean_line):
item = clean_line[index]
if item and item not in ArrayUtil.delimiter_list:
if item and item[0] in ArrayUtil.quote_list:
# starts with a quote, handle quoted text
if item[-1] in ArrayUtil.quote_list:
arr_fixed_line.append(item[1:-1])
else:
arr_fixed_line.append(item[1:])
# loop until trailing quote found
while index < len(clean_line):
index += 1
if index < len(clean_line):
item = clean_line[index]
if item[-1] in ArrayUtil.quote_list:
arr_fixed_line[-1] = \
'{} {}'.format(arr_fixed_line[-1],
item[:-1])
break
else:
arr_fixed_line[-1] = \
'{} {}'.format(arr_fixed_line[-1],
item)
else:
# no quote, just append
arr_fixed_line.append(item)
index += 1
return arr_fixed_line
@staticmethod
def clean_numeric(text):
if isinstance(text, str):
# remove all non-numeric text from leading and trailing positions
# of text
if text:
while text and (text[0] not in ArrayUtil.numeric_chars or text[-1]
not in ArrayUtil.numeric_chars):
if text[0] not in ArrayUtil.numeric_chars:
text = text[1:]
if text and text[-1] not in ArrayUtil.numeric_chars:
text = text[:-1]
return text
def save_array_diff(self, first_array, second_array, first_array_name,
second_array_name):
try:
diff = first_array - second_array
self.save_array(first_array_name, first_array)
self.save_array(second_array_name, second_array)
self.save_array('debug_array_diff.txt', diff)
except:
print("An error occurred while outputting array differences.")
return False
return True
# Saves an array with up to three dimensions
def save_array(self, filename, multi_array):
file_path = os.path.join(self.path, filename)
with open(file_path, 'w') as outfile:
outfile.write('{}\n'.format(str(multi_array.shape)))
if len(multi_array.shape) == 4:
for slice in multi_array:
for second_slice in slice:
for third_slice in second_slice:
for item in third_slice:
outfile.write(' {:10.3e}'.format(item))
outfile.write('\n')
outfile.write('\n')
outfile.write('\n')
elif len(multi_array.shape) == 3:
for slice in multi_array:
np.savetxt(outfile, slice, fmt='%10.3e')
outfile.write('\n')
else:
|
np.savetxt(outfile, multi_array, fmt='%10.3e')
|
numpy.savetxt
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.test_util import GPflowTestCase
from gpflow.test_util import session_context
from gpflow import kernels
from gpflow import ekernels
from numpy.testing import assert_allclose
import pytest
def _assert_pdeq(obj, a, b, k=None, i=-1, l=-1):
obj.assertTrue(np.all(a.shape == b.shape))
pdmax = np.max(np.abs(a / b - 1) * 100)
# print("%s, %f" % (str(type(k)), pdmax))
msg = "Percentage difference above threshold: {0}\nOn kernel: {1} ({2} / {3})"
obj.assertTrue(pdmax < obj._threshold, msg=msg.format(pdmax, str(type(k)), i + 1, l))
def index_block(y, x, D):
return np.s_[y * D:(y + 1) * D, x * D:(x + 1) * D]
class TriDiagonalBlockRep(object):
"""
Transforms an unconstrained representation of a PSD block tri diagonal matrix to its PSD block representation.
"""
def __init__(self):
gpflow.transforms.Transform.__init__(self)
def forward(self, x):
"""
Transforms from the free state to the matrix of blocks.
:param x: Unconstrained state (Nx2DxD), where D is the block size.
:return: Return PSD blocks (2xNxDxD)
"""
N, D = x.shape[0], x.shape[2]
diagblocks = np.einsum('nij,nik->njk', x, x)
ob = np.einsum('nij,nik->njk', x[:-1, :, :], x[1:, :, :])
# ob = np.einsum('nij,njk->nik', x[:-1, :, :].transpose([0, 2, 1]), x[1:, :, :])
offblocks = np.vstack((ob, np.zeros((1, D, D))))
return np.array([diagblocks, offblocks])
def forward_tensor(self, x):
N, D = tf.shape(x)[0], tf.shape(x)[2]
xm = tf.slice(x, [0, 0, 0], tf.stack([N - 1, -1, -1]))
xp = x[1:, :, :]
diagblocks = tf.matmul(x, x, transpose_a=True)
offblocks = tf.concat_v2([tf.matmul(xm, xp, transpose_a=True), tf.zeros((1, D, D), 0, dtype=tf.float64)])
return tf.stack([diagblocks, offblocks])
def __str__(self):
return "BlockTriDiagonal"
class TestKernExpDelta(GPflowTestCase):
"""
Check whether the normal kernel matrix is recovered if a delta distribution is used. First initial test which should
indicate whether things work or not.
"""
@gpflow.defer_build()
def setUp(self):
self.test_graph = tf.Graph()
with self.test_context():
self.D = 2
self.rng = np.random.RandomState(0)
self.Xmu = self.rng.rand(10, self.D)
self.Z = self.rng.rand(4, self.D)
self.Xcov = np.zeros((self.Xmu.shape[0], self.D, self.D))
self.Xcovc = np.zeros((self.Xmu.shape[0], self.D, self.D))
k1 = ekernels.RBF(self.D, ARD=True)
k1.lengthscales = self.rng.rand(2) + [0.5, 1.5]
k1.variance = 0.3 + self.rng.rand()
k2 = ekernels.RBF(self.D)
k2.lengthscales = self.rng.rand(1) + [0.5]
k2.variance = 0.3 + self.rng.rand()
klin = ekernels.Linear(self.D, variance=0.3+self.rng.rand())
self.kernels = [k1, klin, k2]
def tearDown(self):
GPflowTestCase.tearDown(self)
for kernel in self.kernels:
kernel.clear()
def test_eKzxKxz(self):
for k in self.kernels:
with self.test_context():
k.compile()
psi2 = k.compute_eKzxKxz(self.Z, self.Xmu, self.Xcov)
kernmat = k.compute_K(self.Z, self.Xmu) # MxN
kernouter = np.einsum('in,jn->nij', kernmat, kernmat)
self.assertTrue(np.allclose(kernouter, psi2))
def test_eKdiag(self):
for k in self.kernels:
with self.test_context():
k.compile()
kdiag = k.compute_eKdiag(self.Xmu, self.Xcov)
orig = k.compute_Kdiag(self.Xmu)
self.assertTrue(np.allclose(orig, kdiag))
def test_exKxz_pairwise(self):
covall = np.array([self.Xcov, self.Xcovc])
for k in self.kernels:
with self.test_context():
if isinstance(k, ekernels.Linear):
continue
k.compile()
exKxz = k.compute_exKxz_pairwise(self.Z, self.Xmu, covall)
Kxz = k.compute_K(self.Xmu[:-1, :], self.Z) # NxM
xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu[1:, :])
self.assertTrue(np.allclose(xKxz, exKxz))
# def test_exKxz(self):
# for k in self.kernels:
# with self.test_session():
# if isinstance(k, ekernels.Linear):
# continue
# k.compile()
# exKxz = k.compute_exKxz(self.Z, self.Xmu, self.Xcov)
# Kxz = k.compute_K(self.Xmu, self.Z) # NxM
# xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu)
# self.assertTrue(np.allclose(xKxz, exKxz))
def test_Kxz(self):
for k in self.kernels:
with self.test_context():
k.compile()
psi1 = k.compute_eKxz(self.Z, self.Xmu, self.Xcov)
kernmat = k.compute_K(self.Z, self.Xmu) # MxN
self.assertTrue(np.allclose(kernmat, psi1.T))
class TestKernExpActiveDims(GPflowTestCase):
_threshold = 0.5
@gpflow.defer_build()
def setUp(self):
self.test_graph = tf.Graph()
with self.test_context():
self.N = 4
self.D = 2
self.rng = np.random.RandomState(0)
self.Xmu = self.rng.rand(self.N, self.D)
self.Z = self.rng.rand(3, self.D)
unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
t = TriDiagonalBlockRep()
self.Xcov = t.forward(unconstrained)
variance = 0.3 + self.rng.rand()
k1 = ekernels.RBF(1, variance, active_dims=[0])
k2 = ekernels.RBF(1, variance, active_dims=[1])
klin = ekernels.Linear(1, variance, active_dims=[1])
self.ekernels = [k1, k2, klin] # Kernels doing the expectation in closed form, doing the slicing
k1 = ekernels.RBF(1, variance)
k2 = ekernels.RBF(1, variance)
klin = ekernels.Linear(1, variance)
self.pekernels = [k1, k2, klin] # kernels doing the expectations in closed form, without slicing
k1 = kernels.RBF(1, variance, active_dims=[0])
klin = kernels.Linear(1, variance, active_dims=[1])
self.kernels = [k1, klin]
k1 = kernels.RBF(1, variance)
klin = kernels.Linear(1, variance)
self.pkernels = [k1, klin]
def tearDown(self):
GPflowTestCase.tearDown(self)
for kernel in self.kernels + self.ekernels + self.pekernels + self.pkernels:
kernel.clear()
def test_quad_active_dims(self):
with self.test_context():
for k, pk in zip(self.kernels + self.ekernels, self.pkernels + self.pekernels):
k.compile()
pk.compile()
a = k.compute_eKdiag(self.Xmu, self.Xcov[0, :, :, :])
sliced = np.take(
np.take(self.Xcov, k.active_dims, axis=-1),
k.active_dims,
axis=-2)
b = pk.compute_eKdiag(self.Xmu[:, k.active_dims], sliced[0, :, :, :])
_assert_pdeq(self, a, b, k)
a = k.compute_eKxz(self.Z, self.Xmu, self.Xcov[0, :, :, :])
sliced = np.take(
np.take(self.Xcov, k.active_dims, axis=-1),
k.active_dims,
axis=-2)
b = pk.compute_eKxz(
self.Z[:, k.active_dims],
self.Xmu[:, k.active_dims],
sliced[0, :, :, :])
_assert_pdeq(self, a, b, k)
a = k.compute_eKzxKxz(self.Z, self.Xmu, self.Xcov[0, :, :, :])
sliced = np.take(
|
np.take(self.Xcov, k.active_dims, axis=-1)
|
numpy.take
|
#!/usr/bin/env python
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Attributions
#
# This Python script was adapted from the original by <NAME>,
# Data Science and Machine Learning Enthusiast and author at
# towardsdatascience.com. See original Medium post:
# https://towardsdatascience.com/build-deploy-a-spam-classifier-app-on-heroku-cloud-in-10-minutes-f9347b27ff72
import os
import joblib
import numpy as np
import custom.deploy_models as deploy
from flask import Flask, render_template, url_for, request
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict',methods=['POST'])
def predict_spam():
XGboost_mod1_PATH = os.path.join("data",
"5_deployment",
"XGboost_mod1.joblib")
with open(XGboost_mod1_PATH, 'rb') as f:
XGboost_model = joblib.load(f)
if request.method == 'POST':
msg = request.form['message']
text =
|
np.array([msg])
|
numpy.array
|
'''
- tangent is not following the finite different of points very well (but the same happens with original data..)
'''
import numpy as np
tx = np.loadtxt('tx-data')
ty = np.loadtxt('ty-data')
tz =
|
np.loadtxt('tz-data')
|
numpy.loadtxt
|
import os
import argparse
from PIL import Image
import numpy as np
import json
from utils.metrics import iou_stats
def parse_argument():
parser = argparse.ArgumentParser(
description='Benchmark over 2D-3D-Semantics on segmentation, '\
+'depth and surface normals estimation')
parser.add_argument('--pred_dir', type=str, default='',
help='/path/to/prediction.')
parser.add_argument('--gt_dir', type=str, default='',
help='/path/to/ground-truths.')
parser.add_argument('--inst_class_json', type=str, default='',
help='/path/to/json/of/inst/classes.')
parser.add_argument('--depth_unit', type=float, default=512.0,
help='Each pixel value difference means 1/depth_unit '
'meters.')
parser.add_argument('--num_classes', type=int, default=21,
help='number of segmentation classes.')
parser.add_argument('--string_replace', type=str, default=',',
help='replace the first string with the second one.')
parser.add_argument('--train_depth', action='store_true',
help='enable/disable to benchmark depth.')
parser.add_argument('--train_normal', action='store_true',
help='enable/disable to benchmark surface normal.')
args = parser.parse_args()
return args
def benchmark_depth(pred_dir, gt_dir, inst_dir,
instance_labels, string_replace):
"""Benchmark depth estimation.
"""
print('Benchmarking instance-level depth estimations.')
assert(os.path.isdir(pred_dir))
assert(os.path.isdir(gt_dir))
assert(os.path.isdir(inst_dir))
num_instance = len(instance_labels)
num_pixels = np.zeros(num_instance, dtype=np.float64)
rmse_linear = np.zeros(num_instance, dtype=np.float64)
rmse_log = np.zeros(num_instance, dtype=np.float64)
absrel = np.zeros(num_instance, dtype=np.float64)
sqrrel = np.zeros(num_instance, dtype=np.float64)
thresholds = [np.zeros(num_instance, dtype=np.float64) for _ in range(5)]
for dirpath, dirnames, filenames in os.walk(pred_dir):
for filename in filenames:
predname = os.path.join(dirpath, filename)
gtname = predname.replace(pred_dir, gt_dir)
instname = gtname.replace('depth', 'semantic')
if string_replace != '':
stra, strb = string_replace.split(',')
gtname = gtname.replace(stra, strb)
pred = np.asarray(
Image.open(predname).convert(mode='I'),
dtype=np.int32)
gt = np.asarray(
Image.open(gtname).convert(mode='I'),
dtype=np.int32)
inst = np.asarray(
Image.open(instname).convert(mode='RGB'),
dtype=np.uint8)
inst = inst[:,:,0]*256**2+inst[:,:,1]*256+inst[:,:,2]
pred = np.reshape(pred, (-1,))
gt = np.reshape(gt, (-1,))
inst = np.reshape(inst, (-1,))
mask = gt < 2**16-1
pred = np.clip(pred, 51, 26560)
pred = pred[mask].astype(np.float32)/args.depth_unit
gt = gt[mask].astype(np.float32)/args.depth_unit
inst = inst[mask]
for inst_ind in np.unique(inst):
if inst_ind == 855309:
continue
m = inst == inst_ind
if not m.any():
continue
pred_m = pred[m]
gt_m = gt[m]
rmse_linear[inst_ind] += np.sum((pred_m-gt_m)**2)
rmse_log[inst_ind] += np.sum(
(np.log(pred_m)-np.log(gt_m))**2)
absrel[inst_ind] += np.sum(np.abs(pred_m-gt_m)/gt_m)
sqrrel[inst_ind] += np.sum((pred_m-gt_m)**2/gt_m)
th = np.maximum(pred_m/gt_m, gt_m/pred_m)
for i in range(len(thresholds)):
thresholds[i][inst_ind] += np.sum(th < 1.25**(np.power(2.0, i-2)))
num_pixels[inst_ind] += m.sum()
# instance level metrics.
num_pixels = np.maximum(num_pixels, 1e-12)
rmse_linear = np.sqrt(rmse_linear/num_pixels)
rmse_log = np.sqrt(rmse_log/num_pixels)
absrel = absrel / num_pixels
sqrrel = sqrrel / num_pixels
for i in range(len(thresholds)):
thresholds[i] = thresholds[i] / num_pixels
# semantic level metrics.
cls_names = {}
cls_num_insts = []
cls_rmse_linear = []
cls_rmse_log = []
cls_absrel = []
cls_sqrrel = []
cls_thresholds = [[] for t in thresholds]
for inst_ind, inst_name in enumerate(instance_labels):
cls_name = inst_name.split('_')[0]
if cls_names.get(cls_name, None) is None:
cls_names[cls_name] = len(cls_names)
cls_rmse_linear.append(0.0)
cls_rmse_log.append(0.0)
cls_absrel.append(0.0)
cls_sqrrel.append(0.0)
for t in cls_thresholds:
t.append(0.0)
cls_num_insts.append(0)
if num_pixels[inst_ind] >= 1:
cls_ind = cls_names[cls_name]
cls_num_insts[cls_ind] += 1
cls_rmse_linear[cls_ind] += rmse_linear[inst_ind]
cls_rmse_log[cls_ind] += rmse_log[inst_ind]
cls_absrel[cls_ind] += absrel[inst_ind]
cls_sqrrel[cls_ind] += sqrrel[inst_ind]
for ct, it in zip(cls_thresholds, thresholds):
ct[cls_ind] += it[inst_ind]
cls_num_insts = np.maximum(np.array(cls_num_insts), 1e-12)
cls_rmse_linear = np.array(cls_rmse_linear) / cls_num_insts
cls_rmse_log = np.array(cls_rmse_log) / cls_num_insts
cls_absrel = np.array(cls_absrel) / cls_num_insts
cls_sqrrel = np.array(cls_sqrrel) / cls_num_insts
for i in range(len(cls_thresholds)):
cls_thresholds[i] = np.array(cls_thresholds[i]) / cls_num_insts
for cls_name, cls_ind in cls_names.items():
print('class {:s}, RMSE(lin): {:.4f}'.format(
cls_name, cls_rmse_linear[cls_ind]))
print('class {:s}, RMSE(log): {:.4f}'.format(
cls_name, cls_rmse_log[cls_ind]))
print('class {:s}, abs rel: {:.4f}'.format(
cls_name, cls_absrel[cls_ind]))
print('class {:s}, sqr rel: {:.4f}'.format(
cls_name, cls_sqrrel[cls_ind]))
for i in range(len(cls_thresholds)):
print('class {:s}, \sigma < 1.25^{:.4f}: {:.4f}'.format(
cls_name, np.power(2.0, i-2), cls_thresholds[i][cls_ind]))
print('class {:s}, \sigma < 1.25: {:.4f}'.format(
cls_name, cls_thresholds[0][cls_ind]))
print('class {:s}, \sigma < 1.25^2: {:.4f}'.format(
cls_name, cls_thresholds[1][cls_ind]))
print('class {:s}, \sigma < 1.25^3: {:.4f}'.format(
cls_name, cls_thresholds[2][cls_ind]))
def benchmark_normal(pred_dir, gt_dir, inst_dir,
instance_labels, string_replace):
"""Benchmark surface normal estimations.
"""
print('Benchmarking instance-level surface normal estimations.')
assert(os.path.isdir(pred_dir))
assert(os.path.isdir(gt_dir))
assert(os.path.isdir(inst_dir))
num_instance = len(instance_labels)
angles = [[] for _ in range(num_instance)]
for dirpath, dirnames, filenames in os.walk(pred_dir):
for filename in filenames:
predname = os.path.join(dirpath, filename)
gtname = predname.replace(pred_dir, gt_dir)
instname = (gtname.replace('normal', 'semantic')
.replace('semantics', 'semantic'))
if string_replace != '':
stra, strb = string_replace.split(',')
gtname = gtname.replace(stra, strb)
pred = np.asarray(
Image.open(predname).convert(mode='RGB'),
dtype=np.uint8)
gt = np.asarray(
Image.open(gtname).convert(mode='RGB'),
dtype=np.uint8)
inst = np.asarray(
Image.open(instname).convert(mode='RGB'),
dtype=np.uint8)
inst = inst[:,:,0]*256**2+inst[:,:,1]*256+inst[:,:,2]
pred = np.reshape(pred, (-1,3))
gt = np.reshape(gt, (-1,3))
inst = np.reshape(inst, (-1,))
mask = np.any(gt != 128, axis=-1)
pred = pred[mask, :].astype(np.float32)-127.5
gt = gt[mask, :].astype(np.float32)-127.5
inst = inst[mask]
pred = pred / (np.linalg.norm(pred, axis=-1, keepdims=True)+1e-12)
gt = gt / (np.linalg.norm(gt, axis=-1, keepdims=True)+1e-12)
cos = np.sum(pred*gt, axis=-1)
abs_cos = np.abs(cos)
assert(not (abs_cos-1 > 1e-5).any())
cos = np.clip(cos, -1, 1)
for inst_ind in np.unique(inst):
if inst_ind == 855309:
continue
m = inst == inst_ind
if m.any():
angles[inst_ind].append(cos[m])
# semantic level metrics.
cls_names = {}
cls_mean_angles = []
cls_med_angles = []
cls_angles_3 = []
cls_angles_6 = []
cls_angles_11 = []
cls_angles_22 = []
cls_angles_30 = []
cls_num_insts = []
for inst_ind, inst_name in enumerate(instance_labels):
cls_name = inst_name.split('_')[0]
if cls_names.get(cls_name, None) is None:
cls_names[cls_name] = len(cls_names)
cls_mean_angles.append(0.0)
cls_med_angles.append(0.0)
cls_angles_3.append(0.0)
cls_angles_6.append(0.0)
cls_angles_11.append(0.0)
cls_angles_22.append(0.0)
cls_angles_30.append(0.0)
cls_num_insts.append(0)
inst_angs = angles[inst_ind]
if len(inst_angs) > 0:
inst_angs = np.hstack(inst_angs)
inst_angs = np.arccos(inst_angs)*(180.0/np.pi)
cls_ind = cls_names[cls_name]
cls_mean_angles[cls_ind] += np.mean(inst_angs)
cls_med_angles[cls_ind] += np.median(inst_angs)
cls_angles_3[cls_ind] += np.mean(inst_angs <= 2.8125)
cls_angles_6[cls_ind] += np.mean(inst_angs <= 5.625)
cls_angles_11[cls_ind] += np.mean(inst_angs <= 11.25)
cls_angles_22[cls_ind] += np.mean(inst_angs <= 22.5)
cls_angles_30[cls_ind] += np.mean(inst_angs <= 30)
cls_num_insts[cls_ind] += 1
cls_num_insts = np.maximum(np.array(cls_num_insts), 1e-12)
cls_mean_angles = np.array(cls_mean_angles) / cls_num_insts
cls_med_angles = np.array(cls_med_angles) / cls_num_insts
cls_angles_3 = np.array(cls_angles_3) / cls_num_insts
cls_angles_6 = np.array(cls_angles_6) / cls_num_insts
cls_angles_11 = np.array(cls_angles_11) / cls_num_insts
cls_angles_22 =
|
np.array(cls_angles_22)
|
numpy.array
|
#!/usr/bin/env python
"""
Utility functions for ncvue.
The utility functions do not depend on the ncvue class.
Functions depending on the class are in ncvmethods.
This module was written by <NAME> while at Institut National de
Recherche pour l'Agriculture, l'Alimentation et l'Environnement (INRAE), Nancy,
France.
Copyright (c) 2020-2021 <NAME> - mc (at) macu (dot) de
Released under the MIT License; see LICENSE file for details.
History:
* Written Nov-Dec 2020 by <NAME> (mc (at) macu (dot) de)
* General get_slice function from individual methods for x, y, y2, z,
Dec 2020, <NAME>
* Added arithmetics to apply on axis/dimensions such as mean, std, etc.,
Dec 2020, <NAME>
* Added clone_ncvmain, removing its own module, Dec 2020, <NAME>
* added SEPCHAR and DIMMETHODS, Jan 2021, <NAME>
* pass only ncvMain widget to clone_ncvmain, Jan 2021, <NAME>
* pass only root widget to clone_ncvmain, Jan 2021, <NAME>
* set correct missing value for date variable in numpy's datetime64[ms] format
May 2021, <NAME>
* added format_coord functions for scatter, contour, and map,
May 2021, <NAME>
* replaced add_cyclic_point with add_cyclic as submitted to cartopy,
Jun 2021, <NAME>
* removed SEPCHAR, Jun 2021, <NAME>
.. moduleauthor:: <NAME>
The following functions are provided:
.. autosummary::
DIMMETHODS
add_cyclic
clone_ncvmain
format_coord_contour
format_coord_map
format_coord_scatter
get_slice
list_intersection
set_axis_label
set_miss
spinbox_values
vardim2var
zip_dim_name_length
"""
from __future__ import absolute_import, division, print_function
import tkinter as tk
import numpy as np
import matplotlib.dates as mpld
import cartopy.crs as ccrs
__all__ = ['DIMMETHODS',
'add_cyclic', 'clone_ncvmain',
'format_coord_contour', 'format_coord_map', 'format_coord_scatter',
'get_slice',
'list_intersection', 'set_axis_label', 'set_miss',
'spinbox_values', 'vardim2var', 'zip_dim_name_length']
DIMMETHODS = ('mean', 'std', 'min', 'max', 'ptp', 'sum', 'median', 'var')
"""
Arithmetic methods implemented on dimensions.
mean - average
std - standard deviation
min - minimum
max - maximum
ptp - point-to-point amplitude = max - min
sum - sum
median - 50-percentile
var - variance
"""
def _add_cyclic_data(data, axis=-1):
"""
Add a cyclic point to a data array.
Parameters
----------
data : ndarray
An n-dimensional array of data to add a cyclic point to.
axis: int, optional
Specifies the axis of the data array to add the cyclic point to.
Defaults to the right-most axis.
Returns
-------
The data array with a cyclic point added.
"""
slicer = [slice(None)] * data.ndim
try:
slicer[axis] = slice(0, 1)
except IndexError:
estr = 'The specified axis does not correspond to an array dimension.'
raise ValueError(estr)
npc = np.ma if np.ma.is_masked(data) else np
return npc.concatenate((data, data[tuple(slicer)]), axis=axis)
def _add_cyclic_lon(lon, axis=-1, cyclic=360):
"""
Add a cyclic point to a longitude array.
Parameters
----------
lon: ndarray, optional
An array which specifies the coordinate values for
the dimension the cyclic point is to be added to.
axis: int, optional
Specifies the axis of the longitude array to add the cyclic point to.
Defaults to the right-most axis.
cyclic: float, optional
Width of periodic domain (default: 360)
Returns
-------
The coordinate `lon` with a cyclic point added.
"""
npc = np.ma if np.ma.is_masked(lon) else np
# get cyclic longitudes
# clon is the code from basemap (addcyclic)
# https://github.com/matplotlib/basemap/blob/master/lib/mpl_toolkits/basemap/__init__.py
clon = (
|
np.take(lon, [0], axis=axis)
|
numpy.take
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import pytest
import popart
import torch
import test_util as tu
def test_conversion_basic():
data = np.array([[1, 2]])
builder = popart.Builder()
inInfo = popart.TensorInfo("FLOAT", data.shape)
i1 = builder.addInputTensor(inInfo)
o = builder.aiOnnx.identity([i1])
builder.addOutputTensor(o)
float_proto = builder.getModelProto()
half_proto = _convert_floats_to_halfs(float_proto)
inputs = {i1: data}
float_anchors = _run_model(float_proto, _as_type(inputs, np.float32), o)
half_anchors = _run_model(half_proto, _as_type(inputs, np.float16), o)
_check_anchors(float_anchors, half_anchors)
def test_conversion_with_mul():
d1 = np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
d2 = np.array([[0.1, 0.4, 0.2, 0.5, 0.3, 0.6, 0.4, 0.7]])
builder = popart.Builder()
i1 = builder.addInputTensor(popart.TensorInfo("FLOAT", d1.shape))
i2 = builder.addInputTensor(popart.TensorInfo("FLOAT", d2.shape))
o = builder.aiOnnx.mul([i1, i2])
builder.addOutputTensor(o)
float_proto = builder.getModelProto()
half_proto = _convert_floats_to_halfs(float_proto)
inputs = {i1: d1, i2: d2}
float_anchors = _run_model(float_proto, _as_type(inputs, np.float32), o)
half_anchors = _run_model(half_proto, _as_type(inputs, np.float16), o)
_check_anchors(float_anchors, half_anchors)
np_half_o = d1.astype(np.float16) * d2.astype(np.float16)
print(half_anchors[o])
print(np_half_o)
def test_conversion_with_initializers():
d1 = np.array([[0.1, 0.4, 0.2, 0.5, 0.3, 0.6, 0.4, 0.7]])
# need to provide some input otherwise const folding errors
d2 =
|
np.zeros(d1.shape)
|
numpy.zeros
|
# Generic libraries
import gpflow
import numpy as np
import tensorflow as tf
import unittest
# Branching files
from BranchedGP import VBHelperFunctions
from BranchedGP import BranchingTree as bt
from BranchedGP import branch_kernParamGPflow as bk
from BranchedGP import assigngp_dense
from BranchedGP import FitBranchingModel
class TestKL(unittest.TestCase):
def test(self):
fDebug = True # Enable debugging output - tensorflow print ops
np.set_printoptions(suppress=True, precision=5)
seed = 43
np.random.seed(seed=seed) # easy peasy reproducibeasy
tf.set_random_seed(seed)
# Data generation
N = 20
t = np.linspace(0, 1, N)
print(t)
trueB = np.ones((1, 1))*0.5
Y = np.zeros((N, 1))
idx = np.nonzero(t>0.5)[0]
idxA = idx[::2]
idxB = idx[1::2]
print(idx)
print(idxA)
print(idxB)
Y[idxA, 0] = 2 * t[idxA]
Y[idxB, 0] = -2 * t[idxB]
globalBranchingLabels = np.ones(N)
globalBranchingLabels[4::2] = 2
globalBranchingLabels[5::2] = 3
XExpanded, indices, _ = VBHelperFunctions.GetFunctionIndexListGeneral(t)
phiInitial, phiPrior = FitBranchingModel.GetInitialConditionsAndPrior(globalBranchingLabels, 0.51, False)
ptb = np.min([np.min(t[globalBranchingLabels == 2]), np.min(t[globalBranchingLabels == 3])])
tree = bt.BinaryBranchingTree(0, 1, fDebug=False)
tree.add(None, 1, np.ones((1, 1)) * ptb) # B can be anything here
(fm1, _) = tree.GetFunctionBranchTensor()
# Look at kernels
fDebug=True
Kbranch1 = bk.BranchKernelParam(gpflow.kernels.Matern32(1), fm1, b=np.ones((1, 1)) * ptb, fDebug=fDebug)
K1 = Kbranch1.compute_K(XExpanded, XExpanded)
Kbranch2 = bk.BranchKernelParam(gpflow.kernels.Matern32(1), fm1, b=np.ones((1, 1)) * 0.20, fDebug=fDebug)
K2 = Kbranch2.compute_K(XExpanded, XExpanded)
Kbranch3 = bk.BranchKernelParam(gpflow.kernels.Matern32(1), fm1, b=np.ones((1, 1)) * 0.22, fDebug=fDebug)
K3 = Kbranch3.compute_K(XExpanded, XExpanded)
# Look at model
kb = bk.BranchKernelParam(gpflow.kernels.Matern32(1), fm1, b=np.zeros((1, 1))) + gpflow.kernels.White(1)
kb.kernels[1].variance = 1e-6 # controls the discontinuity magnitude, the gap at the branching point
kb.kernels[1].variance.set_trainable(False) # jitter for numerics
# m = assigngp_dense.AssignGP(t, XExpanded, Y, kb, indices, np.ones((1, 1)), phiInitial=phiInitial, phiPrior=phiPrior)
m = assigngp_dense.AssignGP(t, XExpanded, Y, kb, indices, np.ones((1, 1)), phiInitial=phiInitial, phiPrior=phiPrior, KConst=K1, fDebug=True)
m.UpdateBranchingPoint(np.ones((1, 1)) * ptb, phiInitial.copy())
ptbLL = m.compute_log_likelihood()
m.UpdateBranchingPoint(np.ones((1, 1)) * 0.20, phiInitial.copy())
eLL = m.compute_log_likelihood()
m.UpdateBranchingPoint(
|
np.ones((1, 1))
|
numpy.ones
|
"""
Contains utilities, functions, and variables that are commonly used or shared amongst
the figure creation files.
"""
from string import ascii_lowercase
from cycler import cycler
import math
import numpy as np
import pandas as pd
from matplotlib import gridspec, pyplot as plt
import seaborn as sns
import svgutils.transform as st
from ..Analyze import run_Results_over, run_Analyze_over
from ..BaumWelch import calculate_stationary
from ..states.StateDistributionGamma import StateDistribution
from ..states.StateDistributionGaPhs import StateDistribution as phaseStateDist
# T: transition probability matrix
T = np.array([[0.9, 0.1], [0.05, 0.95]], dtype="float")
# pi: the initial probability vector
pi = calculate_stationary(T)
# bern, gamma_a, gamma_scale
state0 = StateDistribution(0.99, 8, 6)
state1 = StateDistribution(0.75, 8, 1)
E = [state0, state1]
state20 = phaseStateDist(0.99, 0.95, 8, 7, 4, 2)
state21 = phaseStateDist(0.95, 0.9, 6, 4, 3, 5)
E2 = [state20, state21]
min_desired_num_cells = (2 ** 4) - 1
max_desired_num_cells = (2 ** 7) - 1
min_experiment_time = 72
max_experiment_time = 144
min_num_lineages = 3
max_num_lineages = 40
num_data_points = 100
scatter_state_1_kws = {
"alpha": 0.5,
"marker": "+",
"s": 20,
}
scatter_state_2_kws = {
"alpha": 0.5,
"marker": "x",
"s": 20,
"color": "green"
}
scatter_kws_list = [scatter_state_1_kws, scatter_state_2_kws]
def getSetup(figsize, gridd):
"""
Establish figure set-up with subplots.
"""
with sns.plotting_context("paper"):
sns.set(
palette="deep",
rc={"axes.facecolor": "#ffffff", # axes background color
"axes.edgecolor": "#000000", # axes edge color
"axes.xmargin": 0, # x margin. See `axes.Axes.margins`
"axes.ymargin": 0, # y margin See `axes.Axes.margins`
"axes.linewidth": 1. / 4,
"grid.linestyle": "-",
"grid.alpha": 1. / 4,
"grid.color": "#000000",
"xtick.bottom": True,
"xtick.direction": "inout",
"xtick.major.width": 1. / 4, # major tick width in points
"xtick.minor.width": 0.5 / 4, # minor tick width in points
"ytick.left": True,
"ytick.direction": "inout",
"ytick.major.width": 1. / 4, # major tick width in points
"ytick.minor.width": 0.5 / 4, # minor tick width in points
"svg.fonttype": "none" # Keep as text
},
)
# Setup plotting space and grid
f = plt.figure(figsize=figsize, dpi=400, constrained_layout=True)
gs1 = gridspec.GridSpec(*gridd, figure=f)
# Get list of axis objects
ax = list()
for x in range(gridd[0] * gridd[1]):
ax.append(f.add_subplot(gs1[x]))
return ax, f
def subplotLabel(axs):
"""
Sublot labels
"""
i = 0
for _, ax in enumerate(axs):
if ax.has_data() or i == 0: # only label plots with graphs on them
ax.text(-0.2, 1.25, ascii_lowercase[i], transform=ax.transAxes, fontsize=16, fontweight="bold", va="top")
i += 1
def overlayCartoon(figFile, cartoonFile, x, y, scalee=1, scale_x=1, scale_y=1, rotate=None):
""" Add cartoon to a figure file. """
# Overlay Figure cartoons
template = st.fromfile(figFile)
cartoon = st.fromfile(cartoonFile).getroot()
cartoon.moveto(x, y, scale_x=scalee * scale_x, scale_y=scalee * scale_y)
if rotate:
cartoon.rotate(rotate, x, y)
template.append(cartoon)
template.save(figFile)
def commonAnalyze(list_of_populations, num_states, xtype="length", **kwargs):
"""
The standard way of analyzing a list of populations (a list of list of lineages)
for analysis and plotting.
"""
list_of_fpi = kwargs.get("list_of_fpi", [None] * len(list_of_populations))
list_of_fT = kwargs.get("list_of_fT", [None] * len(list_of_populations))
list_of_fE = kwargs.get("list_of_fE", [None] * len(list_of_populations))
if num_states == 2:
predicted_num_states = kwargs.get("predicted_num_states", 2)
elif num_states == 3:
predicted_num_states = kwargs.get("predicted_num_states", 3)
elif num_states == 4:
predicted_num_states = kwargs.get("predicted_num_states", 4)
parallel = kwargs.get("parallel", True)
# Analyzing the lineages in the list of populations (parallelized function)
output = run_Analyze_over(list_of_populations, predicted_num_states, parallel=parallel,
list_of_fpi=list_of_fpi, list_of_fT=list_of_fT, list_of_fE=list_of_fE)
# Collecting the results of analyzing the lineages
results_holder = run_Results_over(output)
dictOut = {}
for key in results_holder[0].keys():
dictOut[key] = []
for results_dict in results_holder:
for key, val in results_dict.items():
dictOut[key].append(val)
paramEst = np.array(dictOut["param_estimates"])
paramTrues = np.array(dictOut["param_trues"])
x = None
if xtype == "length":
x = dictOut["total_number_of_cells"]
elif xtype == "prop":
x = dictOut["state_proportions_0"]
elif xtype == "wass":
x = dictOut["wasserstein"]
elif xtype == "bern":
x = paramTrues[:, 0, 0]
return x, paramEst, dictOut, paramTrues
def figureMaker(ax, x, paramEst, dictOut, paramTrues, xlabel="Number of Cells", num_lineages=None, dist_dist=False):
"""
Makes the common 6 panel figures displaying parameter estimation across lineages
of various types and sizes.
"""
# Checks whether we are plotting exponential results, or gamma results
number_of_params = paramEst.shape[-1]
accuracies = dictOut["balanced_accuracy_score"]
tr = dictOut["transition_matrix_norm"]
pii = dictOut["pi_vector_norm"]
accuracy_df = pd.DataFrame(columns=["x", 'accuracy'])
accuracy_df['x'] = x
accuracy_df['accuracy'] = accuracies
accuracy_df['tr'] = tr
accuracy_df['pii'] = pii
accuracy_df['bern 0 0'] = paramEst[:, 0, 0] # bern G1 or Bern
accuracy_df['bern 1 0'] = paramEst[:, 1, 0]
if number_of_params == 6:
accuracy_df['bern 0 1'] = paramEst[:, 0, 1] # bern G2
accuracy_df['bern 1 1'] = paramEst[:, 1, 1]
accuracy_df['gamma 0 2'] = paramEst[:, 0, 2] # gamma G1 shape
accuracy_df['gamma 1 2'] = paramEst[:, 1, 2]
accuracy_df['gamma 0 3'] = paramEst[:, 0, 3] # gamma G1 scale
accuracy_df['gamma 1 3'] = paramEst[:, 1, 3]
accuracy_df['gamma 0 4'] = paramEst[:, 0, 4] # gamma G2 shape
accuracy_df['gamma 1 4'] = paramEst[:, 1, 4]
accuracy_df['gamma 0 5'] = paramEst[:, 0, 5] # gamma G2 scale
accuracy_df['gamma 1 5'] = paramEst[:, 1, 5]
accuracy_df['wasserstein distance 0'] = dictOut["distribution distance 0"]
accuracy_df['wasserstein distance 1'] = dictOut["distribution distance 1"]
else:
accuracy_df['0 1'] = paramEst[:, 0, 1] # gamma shape
accuracy_df['1 1'] = paramEst[:, 1, 1]
accuracy_df['gamma 0 2'] = paramEst[:, 0, 2] # gamma scale
accuracy_df['gamma 1 2'] = paramEst[:, 1, 2]
if num_lineages is not None:
accuracy_df['num lineages'] = num_lineages
i = 0
ax[i].axis('off')
i += 1 # i = 1
ax[i].axis('off')
i += 1 # i = 2
ax[i].axis('off')
i += 1 # i = 3: plot estimation of bernoulli parameter
sns.regplot(x="x", y="bern 0 0", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="bern 1 0", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 0], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 0], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_xlabel(xlabel)
ax[i].set_ylim(bottom=0.0, top=1.02)
if number_of_params == 6:
ax[i].set_ylabel("G1 Bernoulli $p$")
ax[i].set_title(r"G1 Bernoulli $p$")
else:
ax[i].set_ylabel("Bernoulli $p$")
ax[i].set_title(r"Bernoulli $p$")
i += 1 # i = 4
if number_of_params == 6: # phase specific gamma
if dist_dist: # bernoulli G2
sns.regplot(x="x", y="bern 0 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="bern 1 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 1], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 1], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_ylim(bottom=0.0, top=1.02)
ax[i].set_ylabel("S/G2 Bernoulli $p$")
ax[i].set_title(r"S/G2 Bernoulli $p$")
else:
sns.regplot(x="x", y="gamma 0 2", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="gamma 1 2", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 2], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 2], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_ylabel(r"G1 Gamma $k$")
ax[i].set_title(r"G1 Gamma $k$")
ax[i].set_ylim([0.0, 20.0])
else: # simple lifetime gamma
sns.regplot(x="x", y="0 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="1 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 1], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 1], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_ylabel(r"Gamma $k$")
ax[i].set_title(r"Gamma $k$")
ax[i].set_ylim([0.0, 20.0])
ax[i].set_xlabel(xlabel)
i += 1 # i = 5
if number_of_params == 6:
if dist_dist: # plot gamma distance
sns.regplot(x="x", y='wasserstein distance 0', data=accuracy_df, ax=ax[i], lowess=True, label="state 1", marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y='wasserstein distance 1', data=accuracy_df, ax=ax[i], lowess=True, label="state 2", marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].set_title(r"Distance bw true and estm. gamma dists")
ax[i].set_ylabel(r"Wasserstein distance")
ax[i].set_xlabel(xlabel)
ax[i].set_ylim(0.0, 10.0)
ax[i].legend()
else: # plot gamma params scale
sns.regplot(x="x", y="gamma 0 3", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="gamma 1 3", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 3], marker="_", s=20,
c="#00ffff", alpha=0.5, label="State 1")
ax[i].scatter(x, paramTrues[:, 1, 3], marker="_", s=20,
c="#00cc00", alpha=0.5, label="State 2")
ax[i].set_xlabel(xlabel)
ax[i].set_ylabel(r"G1 Gamma $\theta$")
ax[i].set_title(r"G1 Gamma $\theta$")
ax[i].set_ylim([0.0, 20.0])
else: # just simple gamma params
sns.regplot(x="x", y="gamma 0 2", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="gamma 1 2", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 2], marker="_", s=20,
c="#00ffff", alpha=0.5, label="State 1")
ax[i].scatter(x, paramTrues[:, 1, 2], marker="_", s=20,
c="#00cc00", alpha=0.5, label="State 2")
ax[i].set_xlabel(xlabel)
ax[i].set_ylabel(r"Gamma $\theta$")
ax[i].set_title(r"Gamma $\theta$")
ax[i].set_ylim([0.0, 20.0])
ax[i].legend()
i += 1 # i = 6
if number_of_params == 6 and (not dist_dist):
sns.regplot(x="x", y="bern 0 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="bern 1 1", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 1], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 1], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_ylim(bottom=0.0, top=1.02)
ax[i].set_ylabel("S/G2 Bernoulli $p$")
ax[i].set_title(r"S/G2 Bernoulli $p$")
else:
ax[i].set_ylim(bottom=0, top=101)
sns.regplot(x="x", y="accuracy", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
ax[i].set_ylabel(r"Accuracy [%]")
ax[i].set_title("State Assignment Accuracy")
ax[i].set_xlabel(xlabel)
i += 1 # i = 7
if number_of_params == 6 and (not dist_dist):
sns.regplot(x="x", y="gamma 0 4", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[0])
sns.regplot(x="x", y="gamma 1 4", data=accuracy_df, ax=ax[i], lowess=True, marker='+', scatter_kws=scatter_kws_list[1], line_kws={"color": "green"})
ax[i].scatter(x, paramTrues[:, 0, 4], marker="_", s=20, c="#00ffff", alpha=0.5)
ax[i].scatter(x, paramTrues[:, 1, 4], marker="_", s=20, c="#00cc00", alpha=0.5)
ax[i].set_ylabel(r"S/G2 Gamma $k$")
ax[i].set_title(r"S/G2 Gamma $k$")
ax[i].set_ylim([0.0, 20.0])
else:
ax[i].set_ylim(bottom=0, top=
|
np.mean(tr)
|
numpy.mean
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
samples = np.random.normal(10, 1, size=1000)
gaussian = UnivariateGaussian()
gaussian.fit(samples)
print("\n({}, {})\n".format(gaussian.mu_, gaussian.var_))
# Question 2 - Empirically showing sample mean is consistent
ms = np.arange(10, 1001, 10)
estimated_mean = []
for m in ms:
X = samples[:m + 1]
gaussian.fit(X)
estimated_mean.append(gaussian.mu_)
fig = go.Figure([go.Scatter(x=ms, y=estimated_mean, mode='markers+lines', name=r'$\widehat\mu$'),
go.Scatter(x=ms, y=[10] * len(ms), mode='lines', name=r'$\mu$')],
layout=go.Layout(title=r"$\text{Estimation of Expectation As Function Of Number Of Samples}$",
xaxis_title="$m\\text{ - number of samples}$",
yaxis_title="r$\hat\mu$",
height=300))
fig.show()
# Question 3 - Plotting Empirical PDF of fitted model
gaussian.fit(samples)
pdfs = gaussian.pdf(np.linspace(5, 14, 1000))
fig = go.Figure([go.Scatter(x=np.linspace(5, 14, 1000), y=pdfs, mode='markers', marker=dict(size=3), name='PDF'),
go.Scatter(x=samples, y=[0] * 1000, mode='markers', marker=dict(size=3), name='samples')],
layout=go.Layout(title=r"$\text{Empirical PDF of fitted model}$",
xaxis_title='samples',
yaxis_title='PDF',
height=300))
fig.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mu = np.array([0, 0, 4, 0])
sigma = np.array([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]])
samples =
|
np.random.multivariate_normal(mu, sigma, 1000)
|
numpy.random.multivariate_normal
|
#!/usr/bin/env python
'''
Compare candidate periods across N files and output one N-digit binary code per candidate.
In the binary code, "1" denotes detection and "0" denotes non-detection.
ORDER MATTERS: Candidate detection in file i is denoted by "1" in the i^{th} position of the code (read from left to right).
Run using the following syntax.
python compare_cands.py -i <Configuration script of inputs> | tee <Log file>
'''
from __future__ import print_function
from __future__ import absolute_import
# Custom imports
from modules.general_utils import setup_logger_stdout, create_dir
from modules.read_config import read_config
# Standard packages
from argparse import ArgumentParser
from riptide.clustering import cluster1d
from tqdm import tqdm
import os, logging, time, sys, csv
import numpy as np
import pandas as pd
##############################################################
def myexecute(inputs_cfg):
"""
Primary function that handles script execution.
Parameters
----------
inputs_cfg : str
configuration script of inputs
"""
# Profile code execution.
prog_start_time = time.time()
# Read inputs from config file and set default parameter values, if applicable.
hotpotato = read_config(inputs_cfg)
hotpotato = set_defaults(hotpotato)
logger = setup_logger_stdout() # Set logger output to stdout().
# Read .csv files.
N_files = len(hotpotato['csv_list']) # No. of .csv files
logger.info('Total no. of input .csv files = %d'% (N_files))
# Empty arrays to populate in subsequent lines
file_index = np.array([], dtype=int) # File index of candidates
chans = np.array([], dtype=int) # Channel indices of candidates
radiofreqs = np.array([], dtype=np.float64) # Radio frequencies (MHz) of candidate detections
bins = np.array([], dtype=int) # No. of bins across folded profile
widths = np.array([], dtype=int) # No. of samples spanning width of optimal Boxcar filter
periods = np.array([], dtype=np.float64) # Candidate periods (s)
snrs = np.array([], dtype=np.float64) # Candidate S/N
flags = np.array([]) # Harmonic flags assigned to candidates
# Set up separate S/N thresholds for ON and OFF pointings.
snr_cutoff = {'ON': hotpotato['on_cutoff'], 'OFF': hotpotato['off_cutoff']}
for i in range(N_files):
df = pd.read_csv(hotpotato['CSV_DIR']+'/'+hotpotato['csv_list'][i], sep=',')
logger.info('Reading file: %s'% (hotpotato['csv_list'][i]))
cand_chans = np.array(df['Channel'], dtype=int)
cand_radiofreqs = np.array(df['Radio frequency (MHz)'], dtype=np.float64)
cand_bins = np.array(df['Bins'], dtype=int)
cand_widths = np.array(df['Best width'], dtype=int)
cand_periods = np.array(df['Period (s)'], dtype=np.float64)
cand_snrs = np.array(df['S/N'], dtype=np.float64)
cand_flags = np.array(df['Harmonic flag'])
# Only retain fundamental periods with associated S/N values exceeding user-specified threshold.
print('Label = %s'% (hotpotato['labels'][i]))
threshold = snr_cutoff[hotpotato['labels'][i].upper()]
print('S/N threshold applied = %.2f'% (threshold))
filtered_indices = np.where(np.logical_and(cand_snrs>=threshold, cand_flags=='F'))[0]
cand_chans = cand_chans[filtered_indices]
cand_radiofreqs = cand_radiofreqs[filtered_indices]
cand_bins = cand_bins[filtered_indices]
cand_widths = cand_widths[filtered_indices]
cand_periods = cand_periods[filtered_indices]
cand_snrs = cand_snrs[filtered_indices]
cand_flags = cand_flags[filtered_indices]
N_cands = len(cand_chans)
print('No. of candidates = %d \n'% (N_cands))
# Append to grand arrays.
file_index = np.append(file_index, np.ones(N_cands)*i)
chans = np.append(chans, cand_chans)
radiofreqs = np.append(radiofreqs, cand_radiofreqs)
bins = np.append(bins, cand_bins)
widths = np.append(widths, cand_widths)
periods = np.append(periods, cand_periods)
snrs = np.append(snrs, cand_snrs)
flags = np.append(flags, cand_flags)
logger.info('Final no. of candidates across all input files = %d \n'% (len(periods)))
# Open output csvfile.
create_dir(hotpotato['OUTPUT_DIR'])
f = open(hotpotato['OUTPUT_DIR']+'/'+hotpotato['basename']+'_comparecands.csv', 'w')
writer = csv.writer(f, delimiter=',')
header = ['Channel', 'Radio frequency (MHz)', 'Bins', 'Best width', 'Period (s)', 'S/N', 'Code']
writer.writerow(header) # Write header row.
# Loop over channels.
unique_chans =
|
np.unique(chans)
|
numpy.unique
|
import math
import numpy as np
import tensorflow as tf
# constants from game rules
MAX_MAP_SIDE = 32
WOOD_FUEL_VALUE = 1
COAL_FUEL_VALUE = 5
URAN_FUEL_VALUE = 20
COAL_RESEARCH_POINTS = 50
URAN_RESEARCH_POINTS = 200
MAX_ROAD = 6
WORKERS_CARGO = 100
CART_CARGO = 2000
DAY_LENGTH = 30
NIGHT_LENGTH = 10
MAX_DAYS = 360
CYCLE_LENGTH = DAY_LENGTH + NIGHT_LENGTH
TOTAL_CYCLES = MAX_DAYS / (DAY_LENGTH + NIGHT_LENGTH)
# constants for quasi normalization, some variables can be larger than 1
# resources and fuel
WOOD_BOUND = 500
COAL_BOUND = 500
URAN_BOUND = 500
FUEL_BOUND = 10000
# units and cities
UNITS_BOUND = 50
WORKERS_BOUND = UNITS_BOUND
CARTS_BOUND = UNITS_BOUND
CITY_TILES_BOUND = UNITS_BOUND # since the units number is limited by the city tiles number
CITY_TILES_IN_CITY_BOUND = 25
# from https://www.kaggle.com/c/lux-ai-2021/discussion/265886
UPKEEP_BOUND = 10 * CITY_TILES_IN_CITY_BOUND + 20 * math.sqrt(CITY_TILES_IN_CITY_BOUND)
UPKEEP_BOUND_PER_TILE = UPKEEP_BOUND / CITY_TILES_IN_CITY_BOUND
CITIES_BOUND = 5
units_actions_dict = {}
def to_binary(d, m=8):
"""
Args:
d: is an array of decimal numbers to convert to binary
m: is a number of positions in a binary number, 8 is enough for up to 256 decimal, 256 is 2^8
Returns:
np.ndarray of binary representation of d
"""
reversed_order = ((d[:, None] & (1 << np.arange(m))) > 0).astype(np.half)
return np.fliplr(reversed_order)
def get_timing(turn):
current_cycle = turn // CYCLE_LENGTH
turns_before_current_cycle = current_cycle * CYCLE_LENGTH
turns_in_cycle = turn - turns_before_current_cycle
to_next_day = CYCLE_LENGTH - turns_in_cycle
if turns_in_cycle < DAY_LENGTH:
is_night = 0
to_next_night = DAY_LENGTH - turns_in_cycle
else:
is_night = 1
to_next_night = to_next_day + DAY_LENGTH
return current_cycle + 1, to_next_day, to_next_night, is_night
def test_get_timing():
for turn in range(360):
current_cycle, to_next_day, to_next_night, is_night = get_timing(turn)
print(f"Current turn: {turn}; current cycle: {current_cycle}; "
f"to next day: {to_next_day}; to next night: {to_next_night}; is night: {is_night}")
def process(observation, current_game_state):
"""
Args:
observation: An observation, which agents get as an input from kaggle environment.
current_game_state: An object provided by kaggle to simplify game info extraction.
Returns:
processed_observations: A prepared observation to save to the buffer.
"""
global units_actions_dict
player = current_game_state.players[observation.player]
opponent = current_game_state.players[(observation.player + 1) % 2]
width, height = current_game_state.map.width, current_game_state.map.height
shift = int((MAX_MAP_SIDE - width) / 2) # to make all feature maps 32x32
turn = current_game_state.turn
player_units_coords = {}
player_city_tiles_coords = {}
player_research_points = player.research_points
player_city_tiles_count = player.city_tile_count
player_cities_count = len(player.cities)
player_units_count = len(player.units)
player_workers_count = 0
player_carts_count = 0
for unit in player.units:
if unit.is_worker():
player_workers_count += 1
elif unit.is_cart():
player_carts_count += 1
else:
raise ValueError
opponent_research_points = opponent.research_points
opponent_city_tiles_count = opponent.city_tile_count
opponent_cities_count = len(opponent.cities)
opponent_units_count = len(opponent.units)
opponent_workers_count = 0
opponent_carts_count = 0
for unit in opponent.units:
if unit.is_worker():
opponent_workers_count += 1
elif unit.is_cart():
opponent_carts_count += 1
else:
raise ValueError
current_cycle, to_next_day, to_next_night, is_night = get_timing(turn)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is wood
# 2 - wood amount
# 3 - is coal
# 4 - coal amount
# 5 - is uranium
# 6 - uranium amount
# 7 - fuel equivalent
# 8 - if a resource is available for the player, 1 when ready
# 9 - a road lvl
# 10 - 19 for coordinates
# number_of_resources_layers = 20
# A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# for yy in range(height):
# for xx in range(width):
# cell = current_game_state.map.get_cell(xx, yy)
# x, y = yy + shift, xx + shift
# if cell.has_resource():
# A1[0, x, y] = 1 # a resource at the point
# resource = cell.resource
# if resource.type == "wood":
# A1[1, x, y] = 1
# wood_amount = resource.amount
# A1[2, x, y] = wood_amount / WOOD_BOUND
# fuel = wood_amount * WOOD_FUEL_VALUE
# A1[8, x, y] = 1 # wood is always available
# elif resource.type == "coal":
# A1[3, x, y] = 1
# coal_amount = resource.amount
# A1[4, x, y] = coal_amount / COAL_BOUND
# fuel = coal_amount * COAL_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / COAL_RESEARCH_POINTS, 1)
# elif resource.type == "uranium":
# A1[5, x, y] = 1
# uran_amount = resource.amount
# A1[6, x, y] = uran_amount / URAN_BOUND
# fuel = uran_amount * URAN_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / URAN_RESEARCH_POINTS, 1)
# else:
# raise ValueError
# A1[7, x, y] = fuel / FUEL_BOUND
# A1[9, x, y] = cell.road / MAX_ROAD
# A1[10:15, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
# A1[15:20, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is available
# 2 - amount
# 3 - fuel equivalent
# 4 - a road lvl
# 5 - 14 for coordinates
# 15 - next available resource
number_of_resources_layers = 16
A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
for yy in range(height):
for xx in range(width):
cell = current_game_state.map.get_cell(xx, yy)
x, y = yy + shift, xx + shift
if cell.has_resource():
A1[0, x, y] = 1 # a resource at the point
resource = cell.resource
fuel = 0
if resource.type == "wood":
A1[1, x, y] = 1
wood_amount = resource.amount
A1[2, x, y] = wood_amount / WOOD_BOUND
fuel = wood_amount * WOOD_FUEL_VALUE
elif resource.type == "coal":
if player_research_points >= COAL_RESEARCH_POINTS:
A1[1, x, y] = 1
coal_amount = resource.amount
A1[2, x, y] = coal_amount / COAL_BOUND
fuel = coal_amount * COAL_FUEL_VALUE
else:
A1[15, x, y] = 1
elif resource.type == "uranium":
if player_research_points >= URAN_RESEARCH_POINTS:
A1[1, x, y] = 1
uran_amount = resource.amount
A1[2, x, y] = uran_amount / URAN_BOUND
fuel = uran_amount * URAN_FUEL_VALUE
elif player_research_points >= URAN_RESEARCH_POINTS - 50:
A1[15, x, y] = 1
else:
raise ValueError
A1[3, x, y] = fuel / FUEL_BOUND
A1[4, x, y] = cell.road / MAX_ROAD
A1[5:10, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
A1[10:15, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# define city tiles, 0 or 1 for bool, 0 to around 1 for float;
# layers:
number_of_main_layers = 39
A2 =
|
np.zeros((number_of_main_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
|
numpy.zeros
|
# --*-- coding: utf-8 --*--
# Copyright (c) 2020 Guangzhou fermion Technology Co.,Ltd. All rights reserved.
# create by Ben 2020/3/13 上午9:40
from sklearn.metrics import accuracy_score, roc_auc_score, recall_score, precision_score, f1_score, \
mean_absolute_error, precision_recall_curve, auc, mean_squared_error, r2_score
import numpy as np
try:
import tensorflow as tf
except:
pass
def maskNan(yPred, yMask, logger=None):
if logger and np.sum(np.isnan(yPred)) > 0:
logger.warn('yPred existed nan,mask')
yMask = np.clip(yMask - np.isnan(yPred), 0, 1)
return yMask
def prc_auc_score(yTrue, yPred):
precision, recall, _ = precision_recall_curve(yTrue, yPred)
prc_auc = auc(recall, precision)
return prc_auc
def root_mean_squared_error(yTrue, yPred):
rmse = mean_squared_error(yTrue, yPred) ** 0.5
return rmse
def fn_roc_auc(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
roc_auc_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_i = yPred[:, i][yMask_i != 0]
# ----------------------------------------
yTrue_mean = np.mean(yTrue_i)
if yTrue_mean > 0.0 and yTrue_mean < 1.0:
roc_auc_list.append(roc_auc_score(yTrue_i, yPred_i))
if len(roc_auc_list) > 0:
roc_auc = np.mean(roc_auc_list)
else:
roc_auc = 0.0
return roc_auc
def fn_accuracy(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yPred_int = np.int64(yPred > 0.5)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
accuracy_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_int_i = yPred_int[:, i][yMask_i != 0]
accuracy_list.append(accuracy_score(yTrue_i, yPred_int_i))
if len(accuracy_list) > 0:
accuracy = np.mean(accuracy_list)
else:
accuracy = 0.0
return accuracy
def fn_prc_auc(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
prc_auc_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_i = yPred[:, i][yMask_i != 0]
yTrue_mean = np.mean(yTrue_i)
if yTrue_mean > 0.0 and yTrue_mean < 1.0:
prc_auc_list.append(prc_auc_score(yTrue_i, yPred_i))
# =====================================
if len(prc_auc_list) > 0:
prc_auc = np.mean(prc_auc_list)
else:
prc_auc = 0.0
return prc_auc
def fn_recall(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yPred_int = np.int64(yPred > 0.5)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
recall_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_int_i = yPred_int[:, i][yMask_i != 0]
# ----------------------------------------
recall_list.append(recall_score(yTrue_i, yPred_int_i))
if len(recall_list) > 0:
recall = np.mean(recall_list)
else:
recall = 0.0
return recall
def fn_precision(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yPred_int = np.int64(yPred > 0.5)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
precision_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_int_i = yPred_int[:, i][yMask_i != 0]
precision_list.append(precision_score(yTrue_i, yPred_int_i))
if len(precision_list) > 0:
precision = np.mean(precision_list)
else:
precision = 0.0
return precision
def fn_f1(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.int64)
yPred = np.array(yPred, dtype=np.float64)
yPred_int = np.int64(yPred > 0.5)
yMask = np.array(yMask, dtype=np.int64)
# =====================================
f1_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
if np.mean(yMask_i) > 0.0:
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_int_i = yPred_int[:, i][yMask_i != 0]
f1_list.append(f1_score(yTrue_i, yPred_int_i))
if len(f1_list) > 0:
f1 = np.mean(f1_list)
else:
f1 = 0.0
return f1
def fn_rmse(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.float64)
yPred = np.array(yPred, dtype=np.float64)
yMask = np.array(yMask, dtype=np.int64)
rmse_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_i = yPred[:, i][yMask_i != 0]
if np.mean(yMask_i) > 0.0:
rmse_list.append(root_mean_squared_error(yTrue_i, yPred_i))
if len(rmse_list) > 0:
rmse = np.mean(rmse_list)
else:
rmse = 1e3
return rmse
def fn_mae(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.float64)
yPred = np.array(yPred, dtype=np.float64)
yMask = np.array(yMask, dtype=np.int64)
mae_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_i = yPred[:, i][yMask_i != 0]
if np.mean(yMask_i) > 0.0:
mae_list.append(mean_absolute_error(yTrue_i, yPred_i))
if len(mae_list) > 0:
mae = np.mean(mae_list)
else:
mae = 1e3
return mae
def fn_r2(yTrue, yPred, yMask):
yTrue = np.array(yTrue, dtype=np.float64)
yPred = np.array(yPred, dtype=np.float64)
yMask = np.array(yMask, dtype=np.int64)
r2_list = []
for i in range(yTrue.shape[1]):
yMask_i = yMask[:, i]
yTrue_i = yTrue[:, i][yMask_i != 0]
yPred_i = yPred[:, i][yMask_i != 0]
if np.mean(yMask_i) > 0.0:
r2_list.append(r2_score(yTrue_i, yPred_i))
if len(r2_list) > 0:
r2 = np.mean(r2_list)
else:
r2 = -1.0
return r2
metric_fns = \
{'roc_auc': fn_roc_auc,
'accuracy': fn_accuracy,
'prc_auc': fn_prc_auc,
'recall': fn_recall,
'precision': fn_precision,
'f1': fn_f1,
'rmse': fn_rmse,
'mae': fn_mae,
'r2': fn_r2}
metric_betters = \
{'roc_auc': 'max',
'accuracy': 'max',
'prc_auc': 'max',
'recall': 'max',
'precision': 'max',
'f1': 'max',
'rmse': 'min',
'mae': 'min',
'r2': 'max'}
class Metric():
def __init__(self, metricTypes):
self.metricTypes = metricTypes
self.meanFns = {'loss': tf.metrics.Mean()}
for k in metricTypes:
self.meanFns[k] = tf.metrics.Mean()
self.yTrues = []
self.yPreds = []
self.yMasks = []
def stepUpdate(self, yTrue, yPred, yMask, loss=None, writer=None, step=None, prefix='step', calc=True):
yMask = maskNan(yPred, yMask)
self.yTrues.append(yTrue)
self.yPreds.append(yPred)
self.yMasks.append(yMask)
results = {}
if loss is not None:
results['loss'] = loss
if calc:
for k in self.metricTypes:
results[k] = metric_fns[k](yTrue, yPred, yMask)
for k in results:
self.meanFns[k](results[k])
if writer is not None:
with writer.as_default():
for k in results:
tf.summary.scalar(f'{prefix}/{k}', self.meanFns[k].result(), step=tf.cast(step, dtype=tf.int64))
return self.getMeanResult()
def epochCalc(self):
yTrue = np.concatenate(self.yTrues, axis=0)
yPred = np.concatenate(self.yPreds, axis=0)
yMask =
|
np.concatenate(self.yMasks, axis=0)
|
numpy.concatenate
|
import os,sys
import cv2 as cv
import h5py
import numpy as np
import time
import cv2
import numpy as np
from os import path
from subprocess import call
import pickle
import torch
import warnings
warnings.filterwarnings("ignore")
import cv2
import numpy as np
import random
import threading
import pickle
import sys
import cv2
import numpy as np
import csv
import scipy.io as sio
import torch
sys.path.append("../src")
from losses import GazeAngularLoss
def estimateHeadPose(landmarks, face_model, camera, distortion, iterate=True):
ret, rvec, tvec = cv2.solvePnP(face_model[:4], landmarks, camera, distortion, flags=cv2.SOLVEPNP_EPNP)
## further optimize
if iterate:
ret, rvec, tvec = cv2.solvePnP(face_model[:4], landmarks, camera, distortion, rvec, tvec, True)
return rvec, tvec
def normalizeData(img, face, hr, ht, cam):
## normalized camera parameters
focal_norm = 960 # focal length of normalized camera
distance_norm = 600 # normalized distance between eye and camera
roiSize = (60, 36) # size of cropped eye image
img_u = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## compute estimated 3D positions of the landmarks
ht = ht.reshape((3,1))
hR = cv2.Rodrigues(hr)[0] # rotation matrix
# print("HR SHAPE", hR.shape)
# print("FACE SHAPE", face.shape)
Fc = np.dot(hR, face) + ht # 3D positions of facial landmarks
# print("FC SHAPE", Fc.shape)
re = 0.5*(Fc[:,0] + Fc[:,1]).reshape((3,1)) # center of left eye
le = 0.5*(Fc[:,2] + Fc[:,3]).reshape((3,1)) # center of right eye
et = re
distance = np.linalg.norm(et) # actual distance between eye and original camera
z_scale = distance_norm/distance
cam_norm = np.array([
[focal_norm, 0, roiSize[0]/2],
[0, focal_norm, roiSize[1]/2],
[0, 0, 1.0],
])
S = np.array([ # scaling matrix
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, z_scale],
])
hRx = hR[:,0]
forward = (et/distance).reshape(3)
down = np.cross(forward, hRx)
down /= np.linalg.norm(down)
right = np.cross(down, forward)
right /= np.linalg.norm(right)
R = np.c_[right, down, forward].T # rotation matrix R
## normalize each eye
return [re, R]
def process(img, camera_matrix, distortion, annots, curr_img, store_path, por_available=False, show=False):
# process(data, camera_matrix, distortion, annots[pic], curr_img, por_available=True, show=True)
face = sio.loadmat("/content/MPIIGaze/6 points-based face model.mat")["model"]
num_pts = face.shape[1]
facePts = face.T.reshape(num_pts, 1, 3)
img_u = cv2.undistort(img, camera_matrix, distortion)
fx, _, cx, _, fy, cy, _, _, _ = camera_matrix.flatten()
camera_parameters = np.asarray([fx, fy, cx, cy])
# rvec, tvec = self.head_pose_estimator.fit_func(pts, camera_parameters)
s = [int(x) for x in annots[:24]]
landmarks = np.array([[s[0], s[1]], [s[6], s[7]], [s[12], s[13]], [s[18], s[19]]])
landmarks = landmarks.astype(np.float32)
landmarks = landmarks.reshape(4, 1, 2)
hr, ht = estimateHeadPose(landmarks, facePts, camera_matrix, distortion)
g_t = gt = np.array(annots[27:30]).reshape(3, 1)
data = normalizeData(img, face, hr, ht, camera_matrix)
returnv = [data[0], data[1], curr_img]
return returnv
# directions = ['l', 'r', 'u', 'd']
# keys = {'u': 82,
# 'd': 84,
# 'l': 81,
# 'r': 83}
store_path = "/content/Processed/"
os.system("mkdir " + store_path)
path_original = "/content/MPIIGaze/Data/Original"
to_write = {}
output_path = "/content/Drive/MyDrive/MPIIGaze.h5"
def add(key, value): # noqa
if key not in to_write:
to_write[key] = [value]
else:
to_write[key].append(value)
x1 = []
x2 = []
y = []
num_k = 0
path_original = "/content/MPIIGaze/Data/Original"
to_write = {}
x1 = []
x2 = []
y = []
for person in os.listdir(path_original):
os.system("mkdir "+ store_path +person+"/" )
num = int(person[1:])
curr_person_path = os.path.join(path_original, person)
intense_arr = []
print("Processing person ", person)
curr_path = os.path.join(curr_person_path, "Calibration")
cameraCalib = sio.loadmat(os.path.join(curr_path, "Camera.mat"))
camera_matrix = cameraCalib['cameraMatrix']
distortion = cameraCalib['distCoeffs']
final_input_dict = []
for day in os.listdir(curr_person_path):
if(day=="Calibration"):
continue
else:
print("Processing Person and day", person + "/" + day)
os.system("mkdir "+ store_path + person + "/"+day+"/")
curr_path = os.path.join(curr_person_path, day)
annotaion_file_path = os.path.join(curr_path, "annotation.txt")
filea = open(annotaion_file_path, 'r')
Lines = filea.readlines()
annots = []
for line in Lines:
annots.append(np.array([float(x) for x in line.split(' ')]))
for img in os.listdir(curr_path):
if(img=="annotation.txt"):
# print(len(annots))
continue
else:
curr_img = path_original + person + "/"+day+"/" + img
stpath = store_path + person + "/"+day+"/" + img
pic = int(img.split('.')[0])-1
# print(curr_img, pic)
data = cv2.imread(os.path.join(curr_path, img), cv2.COLOR_BGR2RGB)
# print(data.shape)
ret_v = process(data, camera_matrix, distortion, annots[pic], curr_img, stpath, por_available=True, show=True)
final_input_dict.append(np.array([ret_v[0], ret_v[1], ret_v[2]]))
final_ip = np.array(final_input_dict)
intense_arr = np.array(intense_arr)
np.save("/content/Drive/MyDrive/"+ person + "g", final_ip)
print("Saved output for person ", person)
print("Mean in intensity ", np.mean(intense_arr))
print("Variance in intensity", np.var(intense_arr))
# break1
# break
import os,sys
import cv2 as cv
import h5py
import numpy as np
import scipy.io
import time
import cv2
import numpy as np
from os import path
from subprocess import call
import pickle
import torch
import warnings
warnings.filterwarnings("ignore")
from camera import cam_calibrate
from person_calibration import collect_data, fine_tune
from frame_processor import frame_processer
import cv2
import numpy as np
import random
import threading
import pickle
import sys
import torch
sys.path.append("../src")
from losses import GazeAngularLoss
def process(gaze_network, img, camera_matrix, distortion, annots, curr_img, store_path, por_available=False, show=False):
# process(data, camera_matrix, distortion, annots[pic], curr_img, por_available=True, show=True)
g_t = None
# data = {'image_a': [], 'gaze_a': [], 'head_a': [], 'R_gaze_a': [], 'R_head_a': []}
face = scipy.io.loadmat("/content/MPIIGaze/6 points-based face model.mat")["model"]
num_pts = face.shape[1]
facePts = face.T.reshape(num_pts, 1, 3)
# img = self.undistorter.apply(img)
img = cv2.undistort(img, camera_matrix, distortion)
img_u = img
# if por_available:
# g_t = targets[frames_read]
# frames_read += 1
# detect face
#face_location = face.detect(img, scale=0.25, use_max='SIZE')
# if (len(face_location) == 0):
# return []
# use kalman filter to smooth bounding box position
# assume work with complex numbers:
# print("number of face location", len(face_location))
# output_tracked = self.kalman_filters[0].update(face_location[0] + 1j * face_location[1])
# face_location[0], face_location[1] = np.real(output_tracked), np.imag(output_tracked)
# output_tracked = self.kalman_filters[1].update(face_location[2] + 1j * face_location[3])
# face_location[2], face_location[3] = np.real(output_tracked), np.imag(output_tracked)
# # detect facial points
# pts = self.landmarks_detector.detect(face_location, img)
# # run Kalman filter on landmarks to smooth them
# for i in range(68):
# kalman_filters_landm_complex = self.kalman_filters_landm[i].update(pts[i, 0] + 1j * pts[i, 1])
# pts[i, 0], pts[i, 1] = np.real(kalman_filters_landm_complex), np.imag(kalman_filters_landm_complex)
# compute head pose
def vector_to_pitchyaw(vectors):
# """Convert given gaze vectors to yaw (theta) and pitch (phi) angles."""
n = vectors.shape[0]
out = np.empty((n, 2))
vectors = np.divide(vectors, np.linalg.norm(vectors, axis=1).reshape(n, 1))
out[:, 0] = np.arcsin(vectors[:, 1]) # theta
out[:, 1] =
|
np.arctan2(vectors[:, 0], vectors[:, 2])
|
numpy.arctan2
|
#
# Written by <NAME>
# Department of Astronomy, University of Michigan (2009 - )
# Department of Astrophysical Sciences, Princeton University (2005 - 2009)
#
# You can freely use the code.
#
import numpy
import math
def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):
"""Estimating a sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: integer
@param max_iter: max. of iterations
@type low_cut: boolean
@param low_cut: cut out only low values
@type high_cut: boolean
@param high_cut: cut out only high values
@rtype: tuple
@return: (sky value, number of iterations)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.median(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
return (new_sky, iteration)
def sky_mean_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):
"""Estimating a sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: integer
@param max_iter: max. of iterations
@type low_cut: boolean
@param low_cut: cut out only low values
@type high_cut: boolean
@param high_cut: cut out only high values
@rtype: tuple
@return: (sky value, number of iterations)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.mean(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
return (new_sky, iteration)
def range_from_zscale(input_arr, contrast = 1.0, sig_fract = 3.0, percent_fract = 0.01, max_iter=100, low_cut=True, high_cut=True):
"""Estimating ranges with the zscale algorithm
@type input_arr: numpy array
@param input_arr: image data array as sample pixels to derive z-ranges
@type contrast: float
@param contrast: zscale contrast which should be larger than 0.
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: integer
@param max_iter: max. of iterations
@type low_cut: boolean
@param low_cut: cut out only low values
@type high_cut: boolean
@param high_cut: cut out only high values
@rtype: tuple
@return: (min. value, max. value, number of iterations)
"""
work_arr = numpy.ravel(input_arr)
work_arr = numpy.sort(work_arr) # sorting is done.
max_ind = len(work_arr) - 1
midpoint_ind = int(len(work_arr)*0.5)
I_midpoint = work_arr[midpoint_ind]
print(".. midpoint index ", midpoint_ind, " I_midpoint ", I_midpoint)
# initial estimation of the slope
x = numpy.array(list(range(0, len(work_arr)))) - midpoint_ind
y = numpy.array(work_arr)
temp = numpy.vstack([x, numpy.ones(len(x))]).T
slope, intercept = numpy.linalg.lstsq(temp, y)[0]
old_slope = slope
print("... slope & intercept ", old_slope, " ", intercept)
# initial clipping
sig = y.std()
upper_limit = I_midpoint + sig_fract * sig
lower_limit = I_midpoint - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
# new estimation of the slope
x = numpy.array(indices[0]) - midpoint_ind
y = numpy.array(work_arr[indices])
temp = numpy.vstack([x, numpy.ones(len(x))]).T
slope, intercept = numpy.linalg.lstsq(temp, y)[0]
new_slope = slope
print("... slope & intercept ", new_slope, " ", intercept)
iteration = 1
# to run the iteration, we need more than 50% of the original input array
while (((math.fabs(old_slope - new_slope)/new_slope) > percent_fract) and (iteration < max_iter)) and (len(y) >= midpoint_ind) :
iteration += 1
old_slope = new_slope
# clipping
sig = y.std()
upper_limit = I_midpoint + sig_fract * sig
lower_limit = I_midpoint - sig_fract * sig
if low_cut and high_cut:
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
else:
if low_cut:
indices = numpy.where((work_arr > lower_limit))
else:
indices = numpy.where((work_arr < upper_limit))
# new estimation of the slope
x = numpy.array(indices[0]) - midpoint_ind
y = work_arr[indices]
temp = numpy.vstack([x, numpy.ones(len(x))]).T
slope, intercept = numpy.linalg.lstsq(temp, y)[0]
new_slope = slope
print("... slope & intercept ", new_slope, " ", intercept)
z1 = I_midpoint + (new_slope / contrast) * (0 - midpoint_ind)
z2 = I_midpoint + (new_slope / contrast) * (max_ind - midpoint_ind)
return (z1, z2, iteration)
def range_from_percentile(input_arr, low_cut=0.25, high_cut=0.25):
"""Estimating ranges with given percentiles
@type input_arr: numpy array
@param input_arr: image data array as sample pixels to derive ranges
@type low_cut: float
@param low_cut: cut of low-value pixels
@type high_cut: float
@param high_cut: cut of high-value pixels
@rtype: tuple
@return: (min. value, max. value)
"""
work_arr = numpy.ravel(input_arr)
work_arr = numpy.sort(work_arr) # sorting is done.
size_arr = len(work_arr)
low_size = int(size_arr * low_cut)
high_size = int(size_arr * high_cut)
z1 = work_arr[low_size]
z2 = work_arr[size_arr - 1 - high_size]
return (z1, z2)
def histeq(inputArray, num_bins=1024):
"""Performs histogram equalisation of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type num_bins: int
@param num_bins: number of bins in which to perform the operation (e.g. 1024)
@rtype: numpy array
@return: image data array
"""
imageData=numpy.array(inputArray, copy=True)
# histogram equalisation: we want an equal number of pixels in each intensity range
sortedDataIntensities=numpy.sort(numpy.ravel(imageData))
median=numpy.median(sortedDataIntensities)
# Make cumulative histogram of data values, simple min-max used to set bin sizes and range
dataCumHist=numpy.zeros(num_bins)
minIntensity=sortedDataIntensities.min()
maxIntensity=sortedDataIntensities.max()
histRange=maxIntensity-minIntensity
binWidth=histRange/float(num_bins-1)
for i in range(len(sortedDataIntensities)):
binNumber=int(math.ceil((sortedDataIntensities[i]-minIntensity)/binWidth))
addArray=numpy.zeros(num_bins)
onesArray=numpy.ones(num_bins-binNumber)
onesRange=list(range(binNumber, num_bins))
numpy.put(addArray, onesRange, onesArray)
dataCumHist=dataCumHist+addArray
# Make ideal cumulative histogram
idealValue=dataCumHist.max()/float(num_bins)
idealCumHist=numpy.arange(idealValue, dataCumHist.max()+idealValue, idealValue)
# Map the data to the ideal
for y in range(imageData.shape[0]):
for x in range(imageData.shape[1]):
# Get index corresponding to dataIntensity
intensityBin=int(math.ceil((imageData[y][x]-minIntensity)/binWidth))
# Guard against rounding errors (happens rarely I think)
if intensityBin<0:
intensityBin=0
if intensityBin>len(dataCumHist)-1:
intensityBin=len(dataCumHist)-1
# Get the cumulative frequency corresponding intensity level in the data
dataCumFreq=dataCumHist[intensityBin]
# Get the index of the corresponding ideal cumulative frequency
idealBin=
|
numpy.searchsorted(idealCumHist, dataCumFreq)
|
numpy.searchsorted
|
"""
Test Tabular Surrogate Explainer Builder
========================================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import tabular_surrogate_builder
RANDOM_SEED = 42
iris = datasets.load_iris()
x_name, y_name = 'petal length (cm)', 'petal width (cm)'
x_ind = iris.feature_names.index(x_name)
y_ind = iris.feature_names.index(y_name)
X = iris.data[:, [x_ind, y_ind]] # We only take the first two features
Y = iris.target
tree_clf = DecisionTreeClassifier(
max_depth=5, min_samples_leaf=15, random_state=RANDOM_SEED)
tree_clf.fit(X, Y)
logreg_clf = LogisticRegression(random_state=RANDOM_SEED)
logreg_clf.fit(X, Y)
def test_tabular_blimey():
"""Tests bLIMEy explanations."""
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
class_map = {cls: i for i, cls in enumerate(iris.target_names)}
instances = {
'setosa': np.array([1.5, 0.25]),
'versicolor': np.array([4.5, 1.25]),
'virginica': np.array([5.5, 2.25])
}
models = {
'tree-intercept': (tree_clf.predict, True),
'tree-no-intercept': (tree_clf.predict, False),
'logreg-intercept': (logreg_clf.predict, True),
'logreg-no-intercept': (logreg_clf.predict, False)
}
samples = [
[0, 3, 6, 9, 12, 15, 18, 21, 24],
[12, 6, 24, 0, 15, 9, 3, 21, 18]
]
x_bins, y_bins = [1, 2.5, 3.3, 6], [.5, 1.5, 2]
discs = []
for i, ix in enumerate(x_bins):
for iix in x_bins[i + 1:]: # X-axis
for j, jy in enumerate(y_bins): # Y-axis
for jjy in y_bins[j + 1:]:
discs.append({
0: [ix, iix],
1: [jy, jjy]
})
for inst_i, inst in instances.items():
for samples_no_i, samples_no in enumerate(samples):
for cls, cls_i in class_map.items():
for disc_i, disc in enumerate(discs):
for model_i, (pred_fn, intercept) in models.items():
disc_x = [x_min] + disc[0] + [x_max]
disc_y = [y_min] + disc[1] + [y_max]
data = tabular_surrogate_builder._generate_data(
samples_no, disc_x, disc_y, RANDOM_SEED)
exp = tabular_surrogate_builder.build_tabular_blimey(
inst, cls_i, data, pred_fn, disc, intercept, RANDOM_SEED)
key = '{}&{}&{}&{}&{}'.format(
inst_i, samples_no_i, cls, disc_i, model_i)
assert np.allclose(
exp,
EXP[key],
atol=.001,
equal_nan=True
)
EXP = {
'setosa&0&setosa&0&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&0&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&0&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&0&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&1&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&1&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&1&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&1&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&2&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&2&logreg-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&logreg-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&3&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&3&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&3&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&3&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&4&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&4&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&4&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&4&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&5&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&5&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&5&logreg-intercept': np.array([1.3891063829787214, 0.17719148936170231]),
'setosa&0&setosa&5&logreg-no-intercept': np.array([0.9868421052631579, -0.32154605263157887]),
'setosa&0&setosa&6&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&6&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&6&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&6&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&7&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&7&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&7&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&7&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&8&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&8&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&8&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&8&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&9&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&9&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&9&logreg-intercept': np.array([1.6393728222996513, 0.07259001161440241]),
'setosa&0&setosa&9&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&10&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&10&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&10&logreg-intercept': np.array([1.6585365853658536, 0.11382113821138252]),
'setosa&0&setosa&10&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&11&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&11&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&11&logreg-intercept': np.array([1.7238675958188177, 0.1634727061556331]),
'setosa&0&setosa&11&logreg-no-intercept': np.array([1.3610354223433239, -0.6171662125340599]),
'setosa&0&setosa&12&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&12&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&12&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&12&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&13&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&13&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&13&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&13&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&14&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&14&logreg-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&logreg-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&15&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&15&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&15&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&15&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&16&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&16&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&16&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&16&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&17&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&17&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&17&logreg-intercept': np.array([1.61759581881533, 0.05603948896631865]),
'setosa&0&setosa&17&logreg-no-intercept': np.array([1.2084468664850134, -0.8242506811989099]),
'setosa&0&versicolor&0&tree-intercept': np.array([-1.4066382978723382, 0.10485106382978716]),
'setosa&0&versicolor&0&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&0&logreg-intercept': np.array([-1.2977264437689953, 0.008778115501519752]),
'setosa&0&versicolor&0&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&1&tree-intercept': np.array([-1.3062613981762905, 0.22930091185410326]),
'setosa&0&versicolor&1&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&1&logreg-intercept': np.array([-1.2642674772036455, 0.0502613981762915]),
'setosa&0&versicolor&1&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&2&tree-intercept': np.array([-1.105507598784193, 0.47820060790273566]),
'setosa&0&versicolor&2&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&2&logreg-intercept': np.array([-1.2699574468085086, 0.19727659574468057]),
'setosa&0&versicolor&2&logreg-no-intercept': np.array([-1.1710526315789473, 0.3199013157894736]),
'setosa&0&versicolor&3&tree-intercept': np.array([-1.189398176291792, 0.13291185410334339]),
'setosa&0&versicolor&3&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&3&logreg-intercept': np.array([-1.223610942249238, 0.04252887537993902]),
'setosa&0&versicolor&3&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&4&tree-intercept': np.array([-1.0890212765957425, 0.25736170212765935]),
'setosa&0&versicolor&4&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&4&logreg-intercept': np.array([-1.15669300911854, 0.12549544072948324]),
'setosa&0&versicolor&4&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&5&tree-intercept': np.array([-0.9263708206686919, 0.5505896656534954]),
'setosa&0&versicolor&5&tree-no-intercept': np.array([-1.0394736842105263, 0.4103618421052631]),
'setosa&0&versicolor&5&logreg-intercept': np.array([-0.9484498480243149, 0.2150759878419453]),
'setosa&0&versicolor&5&logreg-no-intercept': np.array([-0.9342105263157895, 0.2327302631578947]),
'setosa&0&versicolor&6&tree-intercept': np.array([-0.6081945288753788, 0.15873556231003033]),
'setosa&0&versicolor&6&tree-no-intercept': np.array([-0.4078947368421052, 0.40707236842105254]),
'setosa&0&versicolor&6&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&6&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&7&tree-intercept': np.array([-0.5459209726443761, 0.32751367781155016]),
'setosa&0&versicolor&7&tree-no-intercept': np.array([-0.4605263157894736, 0.4333881578947368]),
'setosa&0&versicolor&7&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&7&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&8&tree-intercept': np.array([-0.4594772036474156, 0.7093981762917936]),
'setosa&0&versicolor&8&tree-no-intercept': np.array([-0.618421052631579, 0.5123355263157895]),
'setosa&0&versicolor&8&logreg-intercept': np.array([0.4359878419452881, 0.15392097264437712]),
'setosa&0&versicolor&8&logreg-no-intercept': np.array([-0.0921052631578948, -0.5008223684210525]),
'setosa&0&versicolor&9&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&9&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&9&logreg-intercept': np.array([-1.5165505226480849, 0.10075493612078955]),
'setosa&0&versicolor&9&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&10&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&10&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&10&logreg-intercept': np.array([-1.4973867595818822, 0.1419860627177698]),
'setosa&0&versicolor&10&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&11&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&11&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&11&logreg-intercept': np.array([-1.4590592334494785, 0.22444831591173042]),
'setosa&0&versicolor&11&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&12&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&12&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&12&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&12&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&13&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&13&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&13&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&13&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&14&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&14&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&14&logreg-intercept': np.array([-0.8780487804878059, 0.2926829268292682]),
'setosa&0&versicolor&14&logreg-no-intercept': np.array([-0.9931880108991824, 0.04495912806539504]),
'setosa&0&versicolor&15&tree-intercept': np.array([-1.4155052264808363, 0.2575493612078976]),
'setosa&0&versicolor&15&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&15&logreg-intercept': np.array([-1.0635888501742157, 0.7116724738675959]),
'setosa&0&versicolor&15&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&16&tree-intercept': np.array([-1.300522648083622, 0.5049361207897793]),
'setosa&0&versicolor&16&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&16&logreg-intercept': np.array([-1.0444250871080138, 0.7529036004645763]),
'setosa&0&versicolor&16&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&17&tree-intercept': np.array([-1.184668989547039, 0.9663182346109179]),
'setosa&0&versicolor&17&tree-no-intercept': np.array([-1.2329700272479562, 0.8623978201634876]),
'setosa&0&versicolor&17&logreg-intercept': np.array([-0.5331010452961679, 0.36817653890824636]),
'setosa&0&versicolor&17&logreg-no-intercept': np.array([-0.6934604904632151, 0.02316076294277924]),
'setosa&0&virginica&0&tree-intercept': np.array([-0.16729483282674798, -0.20741641337386058]),
'setosa&0&virginica&0&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&0&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&0&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&1&tree-intercept': np.array([-0.26767173252279597, -0.3318662613981765]),
'setosa&0&virginica&1&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&1&logreg-intercept': np.array([-0.23705775075987823, -0.2168753799392096]),
'setosa&0&virginica&1&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&2&tree-intercept': np.array([-0.4684255319148932, -0.5807659574468084]),
'setosa&0&virginica&2&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&2&logreg-intercept': np.array([-0.30397568389057705, -0.2998419452887544]),
'setosa&0&virginica&2&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&3&tree-intercept': np.array([-0.12919148936170174, -0.2517446808510638]),
'setosa&0&virginica&3&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&3&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&3&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&4&tree-intercept': np.array([-0.2295683890577505, -0.37619452887538046]),
'setosa&0&virginica&4&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&4&logreg-intercept': np.array([-0.27051671732522753, -0.2583586626139816]),
'setosa&0&virginica&4&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&5&tree-intercept': np.array([-0.39221884498480175, -0.6694224924012161]),
'setosa&0&virginica&5&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&5&logreg-intercept': np.array([-0.4406565349544069, -0.3922674772036473]),
'setosa&0&virginica&5&logreg-no-intercept': np.array([-0.618421052631579, -0.6126644736842104]),
'setosa&0&virginica&6&tree-intercept': np.array([-0.09108814589665652, -0.296072948328267]),
'setosa&0&virginica&6&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&6&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&6&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&7&tree-intercept': np.array([-0.15336170212765957, -0.46485106382978736]),
'setosa&0&virginica&7&tree-no-intercept': np.array([-0.4078947368421052, -0.7804276315789473]),
'setosa&0&virginica&7&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&7&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&8&tree-intercept': np.array([-0.23980547112461933, -0.8467355623100313]),
'setosa&0&virginica&8&tree-no-intercept': np.array([-0.2500000000000001, -0.8593749999999999]),
'setosa&0&virginica&8&logreg-intercept': np.array([-1.3172522796352566, -0.29013981762917995]),
'setosa&0&virginica&8&logreg-no-intercept': np.array([-0.9605263157894737, 0.15213815789473678]),
'setosa&0&virginica&9&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&9&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&9&logreg-intercept': np.array([-0.12282229965156827, -0.17334494773519163]),
'setosa&0&virginica&9&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&10&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&10&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&10&logreg-intercept': np.array([-0.1611498257839721, -0.25580720092915205]),
'setosa&0&virginica&10&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&11&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&11&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&11&logreg-intercept': np.array([-0.2648083623693382, -0.3879210220673635]),
'setosa&0&virginica&11&logreg-no-intercept': np.array([-0.45367847411444123, -0.7942779291553135]),
'setosa&0&virginica&12&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&12&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&12&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&12&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&13&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&13&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&13&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&13&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&14&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&14&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&14&logreg-intercept': np.array([-1.0270034843205562, -0.30052264808362433]),
'setosa&0&virginica&14&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&0&virginica&15&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&15&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&15&logreg-intercept': np.array([-0.6297909407665501, -0.7186411149825784]),
'setosa&0&virginica&15&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&16&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&16&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&16&logreg-intercept': np.array([-0.6489547038327523, -0.7598722415795597]),
'setosa&0&virginica&16&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&17&tree-intercept': np.array([-0.36498257839721343, -0.9973867595818819]),
'setosa&0&virginica&17&tree-no-intercept': np.array([-0.3174386920980926, -0.8950953678474115]),
'setosa&0&virginica&17&logreg-intercept': np.array([-1.0844947735191648, -0.4242160278745647]),
'setosa&0&virginica&17&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&1&setosa&0&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&0&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&0&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&0&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&1&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&1&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&1&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&1&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&2&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&2&logreg-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&logreg-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&3&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&3&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&3&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&3&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&4&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&4&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&4&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&4&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&5&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&5&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&5&logreg-intercept': np.array([1.573902132998746, -0.03061480552070302]),
'setosa&1&setosa&5&logreg-no-intercept': np.array([1.005714285714286, -0.49142857142857155]),
'setosa&1&setosa&6&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&6&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&6&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&6&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&7&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&7&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&7&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&7&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&8&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&8&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&8&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&8&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&9&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&9&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&9&logreg-intercept': np.array([1.6711804758626017, 0.16922959222706882]),
'setosa&1&setosa&9&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&10&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&10&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&10&logreg-intercept': np.array([1.6976512891133129, 0.2012087828016237]),
'setosa&1&setosa&10&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&11&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&11&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&11&logreg-intercept': np.array([1.7614566597812, 0.2204881034350868]),
'setosa&1&setosa&11&logreg-no-intercept': np.array([1.0559646539027983, -0.6318114874815908]),
'setosa&1&setosa&12&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&12&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&12&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&12&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&13&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&13&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&13&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&13&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&14&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&14&logreg-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&logreg-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&15&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&15&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&15&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&15&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&16&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&16&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&16&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&16&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&17&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&17&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&17&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&17&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&versicolor&0&tree-intercept': np.array([-1.1872020075282315, 0.5111668757841914]),
'setosa&1&versicolor&0&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&0&logreg-intercept': np.array([-1.2996235884567129, 0.25621079046424133]),
'setosa&1&versicolor&0&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&1&tree-intercept': np.array([-0.9322459222082816, 0.7179422835633638]),
'setosa&1&versicolor&1&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&1&logreg-intercept': np.array([-1.2358845671267256, 0.30790464240903437]),
'setosa&1&versicolor&1&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&2&tree-intercept': np.array([-0.8047678795483063, 0.8213299874529498]),
'setosa&1&versicolor&2&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&2&logreg-intercept': np.array([-1.2752823086574652, 0.35784190715181996]),
'setosa&1&versicolor&2&logreg-no-intercept': np.array([-1.1725714285714288, 0.44114285714285734]),
'setosa&1&versicolor&3&tree-intercept': np.array([-0.9749058971141784, 0.6140526976160612]),
'setosa&1&versicolor&3&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&3&logreg-intercept': np.array([-1.16060225846926, 0.35006273525721504]),
'setosa&1&versicolor&3&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&4&tree-intercept': np.array([-0.7199498117942287, 0.8208281053952332]),
'setosa&1&versicolor&4&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&4&logreg-intercept': np.array([-0.9693851944792976, 0.5051442910915944]),
'setosa&1&versicolor&4&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&5&tree-intercept': np.array([-0.6328732747804271, 0.9575909661229628]),
'setosa&1&versicolor&5&tree-no-intercept': np.array([-1.0582857142857145, 0.6125714285714288]),
'setosa&1&versicolor&5&logreg-intercept': np.array([-0.8577164366373908, 0.44767879548306233]),
'setosa&1&versicolor&5&logreg-no-intercept': np.array([-0.9531428571428574, 0.3702857142857144]),
'setosa&1&versicolor&6&tree-intercept': np.array([-0.3229611041405272, 0.7711417816813058]),
'setosa&1&versicolor&6&tree-no-intercept': np.array([-0.3874285714285715, 0.7188571428571429]),
'setosa&1&versicolor&6&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&6&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&7&tree-intercept': np.array([-0.10840652446675068, 1.0112923462986212]),
'setosa&1&versicolor&7&tree-no-intercept': np.array([-0.43657142857142867, 0.7451428571428572]),
'setosa&1&versicolor&7&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&7&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&8&tree-intercept': np.array([-0.14253450439146828, 1.2481806775407795]),
'setosa&1&versicolor&8&tree-no-intercept': np.array([-0.6331428571428572, 0.8502857142857143]),
'setosa&1&versicolor&8&logreg-intercept': np.array([0.5741530740276033, 0.2735257214554584]),
'setosa&1&versicolor&8&logreg-no-intercept': np.array([-0.13371428571428576, -0.30057142857142854]),
'setosa&1&versicolor&9&tree-intercept': np.array([-1.2442812332644855, 0.7511284522989824]),
'setosa&1&versicolor&9&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&9&logreg-intercept': np.array([-1.316196159436929, 0.20182082472649332]),
'setosa&1&versicolor&9&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&10&tree-intercept': np.array([-0.9531022875066961, 1.1028995486190813]),
'setosa&1&versicolor&10&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&10&logreg-intercept': np.array([-1.1838420931833873, 0.3617167775992656]),
'setosa&1&versicolor&10&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&11&tree-intercept': np.array([-0.6883941549996199, 1.4226914543646265]),
'setosa&1&versicolor&11&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&11&logreg-intercept': np.array([-1.2103129064340965, 0.3297375870247106]),
'setosa&1&versicolor&11&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&12&tree-intercept': np.array([-1.2442812332644855, 0.7511284522989824]),
'setosa&1&versicolor&12&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&12&logreg-intercept': np.array([-0.8505852650906629, 0.8221253155841185]),
'setosa&1&versicolor&12&logreg-no-intercept': np.array([-1.0471281296023565, 0.5846833578792343]),
'setosa&1&versicolor&13&tree-intercept': np.array([-0.9531022875066961, 1.1028995486190813]),
'setosa&1&versicolor&13&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&13&logreg-intercept': np.array([-0.8505852650906629, 0.8221253155841185]),
'setosa&1&versicolor&13&logreg-no-intercept': np.array([-1.0471281296023565, 0.5846833578792343]),
'setosa&1&versicolor&14&tree-intercept': np.array([-0.6883941549996199, 1.4226914543646265]),
'setosa&1&versicolor&14&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&14&logreg-intercept': np.array([-0.6998699410909677, 0.4839721520924183]),
'setosa&1&versicolor&14&logreg-no-intercept': np.array([-0.96759941089838, 0.16053019145802655]),
'setosa&1&versicolor&15&tree-intercept': np.array([-1.1082549154617132, 0.8114145818988611]),
'setosa&1&versicolor&15&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&15&logreg-intercept': np.array([-0.5941396985693541, 0.8660393236936744]),
'setosa&1&versicolor&15&logreg-no-intercept': np.array([-0.8026509572901326, 0.6141384388807071]),
'setosa&1&versicolor&16&tree-intercept': np.array([-0.8170759697039257, 1.16318567821896]),
'setosa&1&versicolor&16&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&16&logreg-intercept': np.array([-0.5676688853186463, 0.8980185142682289]),
'setosa&1&versicolor&16&logreg-no-intercept': np.array([-0.8026509572901326, 0.6141384388807071]),
'setosa&1&versicolor&17&tree-intercept': np.array([-0.5523678371968499, 1.4829775839645034]),
'setosa&1&versicolor&17&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&17&logreg-intercept': np.array([-0.3796190039017688, 0.547165480835438]),
'setosa&1&versicolor&17&logreg-no-intercept': np.array([-0.7142857142857143, 0.14285714285714288]),
'setosa&1&virginica&0&tree-intercept': np.array([-0.5736511919698881, -0.4652446675031376]),
'setosa&1&virginica&0&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&0&logreg-intercept': np.array([-0.42183186951066537, -0.2602258469259727]),
'setosa&1&virginica&0&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&1&tree-intercept': np.array([-0.8286072772898374, -0.6720200752823098]),
'setosa&1&virginica&1&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&1&logreg-intercept': np.array([-0.48557089084065264, -0.3119196988707659]),
'setosa&1&virginica&1&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&2&tree-intercept': np.array([-0.9560853199498119, -0.775407779171896]),
'setosa&1&virginica&2&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&2&logreg-intercept': np.array([-0.48557089084065264, -0.3119196988707659]),
'setosa&1&virginica&2&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&3&tree-intercept': np.array([-0.5332496863237133, -0.4986198243412803]),
'setosa&1&virginica&3&tree-no-intercept': np.array([-0.6765714285714287, -0.6148571428571429]),
'setosa&1&virginica&3&logreg-intercept': np.array([-0.45370138017565914, -0.2860727728983692]),
'setosa&1&virginica&3&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&4&tree-intercept': np.array([-0.7882057716436645, -0.7053952321204522]),
'setosa&1&virginica&4&tree-no-intercept': np.array([-0.6765714285714287, -0.6148571428571429]),
'setosa&1&virginica&4&logreg-intercept': np.array([-0.6449184441656205, -0.44115432873274896]),
'setosa&1&virginica&4&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&5&tree-intercept': np.array([-0.8752823086574644, -0.8421580928481814]),
'setosa&1&virginica&5&tree-no-intercept': np.array([-0.6274285714285718, -0.641142857142857]),
'setosa&1&virginica&5&logreg-intercept': np.array([-0.7161856963613555, -0.41706398996235955]),
'setosa&1&virginica&5&logreg-no-intercept': np.array([-0.7782857142857146, -0.46742857142857136]),
'setosa&1&virginica&6&tree-intercept': np.array([-0.45244667503136793, -0.5653701380175667]),
'setosa&1&virginica&6&tree-no-intercept': np.array([-0.5782857142857144, -0.6674285714285714]),
'setosa&1&virginica&6&logreg-intercept': np.array([-1.2476787954830602, -0.5867001254705156]),
'setosa&1&virginica&6&logreg-no-intercept': np.array([-0.8342857142857144, -0.25142857142857133]),
'setosa&1&virginica&7&tree-intercept': np.array([-0.6670012547051445, -0.8055207026348825]),
'setosa&1&virginica&7&tree-no-intercept': np.array([-0.5291428571428572, -0.6937142857142856]),
'setosa&1&virginica&7&logreg-intercept': np.array([-1.2476787954830602, -0.5867001254705156]),
'setosa&1&virginica&7&logreg-no-intercept': np.array([-0.8342857142857144, -0.25142857142857133]),
'setosa&1&virginica&8&tree-intercept': np.array([-0.6328732747804267, -1.0424090338770402]),
'setosa&1&virginica&8&tree-no-intercept': np.array([-0.3325714285714286, -0.7988571428571429]),
'setosa&1&virginica&8&logreg-intercept': np.array([-1.5289836888331256, -0.12823086574654946]),
'setosa&1&virginica&8&logreg-no-intercept': np.array([-0.9988571428571431, 0.3017142857142859]),
'setosa&1&virginica&9&tree-intercept': np.array([-0.6088287047662795, -0.7355213832147518]),
'setosa&1&virginica&9&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&9&logreg-intercept': np.array([-0.3549843164256765, -0.37105041695356206]),
'setosa&1&virginica&9&logreg-no-intercept': np.array([-0.7731958762886597, -0.8762886597938145]),
'setosa&1&virginica&10&tree-intercept': np.array([-0.9000076505240656, -1.0872924795348506]),
'setosa&1&virginica&10&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&10&logreg-intercept': np.array([-0.5138091959299242, -0.5629255604008893]),
'setosa&1&virginica&10&logreg-no-intercept': np.array([-0.7731958762886597, -0.8762886597938145]),
'setosa&1&virginica&11&tree-intercept': np.array([-1.1647157830311445, -1.4070843852803956]),
'setosa&1&virginica&11&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&11&logreg-intercept': np.array([-0.5511437533471077, -0.5502256904597981]),
'setosa&1&virginica&11&logreg-no-intercept': np.array([-0.7820324005891016, -0.8291605301914582]),
'setosa&1&virginica&12&tree-intercept': np.array([-0.6088287047662795, -0.7355213832147518]),
'setosa&1&virginica&12&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&12&logreg-intercept': np.array([-0.9916609287736264, -0.8511973070155333]),
'setosa&1&virginica&12&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&13&tree-intercept': np.array([-0.9000076505240656, -1.0872924795348506]),
'setosa&1&virginica&13&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&13&logreg-intercept': np.array([-0.9916609287736264, -0.8511973070155333]),
'setosa&1&virginica&13&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&14&tree-intercept': np.array([-1.1647157830311445, -1.4070843852803956]),
'setosa&1&virginica&14&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&14&logreg-intercept': np.array([-1.1532399969397957, -0.46836508300818763]),
'setosa&1&virginica&14&logreg-no-intercept': np.array([-0.9057437407952872, -0.16936671575846834]),
'setosa&1&virginica&15&tree-intercept': np.array([-0.3679902073292049, -0.7682656261953961]),
'setosa&1&virginica&15&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&15&logreg-intercept': np.array([-1.0181317420243343, -0.8831764975900871]),
'setosa&1&virginica&15&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&16&tree-intercept': np.array([-0.6591691530869906, -1.1200367225154955]),
'setosa&1&virginica&16&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&16&logreg-intercept': np.array([-1.044602555275042, -0.9151556881646411]),
'setosa&1&virginica&16&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&17&tree-intercept': np.array([-0.9238772855940695, -1.4398286282610386]),
'setosa&1&virginica&17&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&17&logreg-intercept': np.array([-1.2326524366919196, -0.5643026547318511]),
'setosa&1&virginica&17&logreg-no-intercept': np.array([-0.9057437407952872, -0.16936671575846834]),
'versicolor&0&setosa&0&tree-intercept': np.array([-1.9266055045871562, 5.783107597965657e-16]),
'versicolor&0&setosa&0&tree-no-intercept': np.array([-1.1882193635748137, 0.6438727149627623]),
'versicolor&0&setosa&0&logreg-intercept': np.array([-1.8409785932721714, -0.05333333333333287]),
'versicolor&0&setosa&0&logreg-no-intercept': np.array([-1.08801624915369, 0.6032498307379824]),
'versicolor&0&setosa&1&tree-intercept': np.array([-1.9266055045871562, 5.783107597965657e-16]),
'versicolor&0&setosa&1&tree-no-intercept': np.array([-1.1882193635748137, 0.6438727149627623]),
'versicolor&0&setosa&1&logreg-intercept': np.array([-1.8409785932721714, -0.05333333333333287]),
'versicolor&0&setosa&1&logreg-no-intercept': np.array([-1.08801624915369, 0.6032498307379824]),
'versicolor&0&setosa&2&tree-intercept': np.array([-1.9261016949152545, 0.0061016949152551386]),
'versicolor&0&setosa&2&tree-no-intercept': np.array([-1.3682432432432434, 0.7474662162162162]),
'versicolor&0&setosa&2&logreg-intercept': np.array([-1.9261016949152545, 0.0061016949152551386]),
'versicolor&0&setosa&2&logreg-no-intercept': np.array([-1.3682432432432434, 0.7474662162162162]),
'versicolor&0&setosa&3&tree-intercept': np.array([-1.7125382262996922, 0.02666666666666699]),
'versicolor&0&setosa&3&tree-no-intercept': np.array([-1.147596479350034, 0.5192958700067705]),
'versicolor&0&setosa&3&logreg-intercept': np.array([-1.8042813455657487, 0.026666666666667067]),
'versicolor&0&setosa&3&logreg-no-intercept': np.array([-1.1679079214624237, 0.5815842924847664]),
'versicolor&0&setosa&4&tree-intercept': np.array([-1.7125382262996922, 0.02666666666666699]),
'versicolor&0&setosa&4&tree-no-intercept': np.array([-1.147596479350034, 0.5192958700067705]),
'versicolor&0&setosa&4&logreg-intercept': np.array([-1.8042813455657487, 0.026666666666667067]),
'versicolor&0&setosa&4&logreg-no-intercept': np.array([-1.1679079214624237, 0.5815842924847664]),
'versicolor&0&setosa&5&tree-intercept': np.array([-1.710056497175139, 0.030056497175141528]),
'versicolor&0&setosa&5&tree-no-intercept': np.array([-1.287162162162162, 0.5920608108108109]),
'versicolor&0&setosa&5&logreg-intercept': np.array([-1.7665536723163846, 0.08655367231638454]),
'versicolor&0&setosa&5&logreg-no-intercept': np.array([-1.3277027027027026, 0.6697635135135135]),
'versicolor&0&setosa&6&tree-intercept': np.array([0.7047099771051965, -0.009396133028069199]),
'versicolor&0&setosa&6&tree-no-intercept': np.array([0.2180406212664277, -0.3763440860215054]),
'versicolor&0&setosa&6&logreg-intercept': np.array([0.8863597263210817, 0.014848536982385543]),
'versicolor&0&setosa&6&logreg-no-intercept': np.array([0.3960573476702509, -0.35483870967741943]),
'versicolor&0&setosa&7&tree-intercept': np.array([0.7047099771051965, -0.009396133028069199]),
'versicolor&0&setosa&7&tree-no-intercept': np.array([0.2180406212664277, -0.3763440860215054]),
'versicolor&0&setosa&7&logreg-intercept': np.array([0.8863597263210817, 0.014848536982385543]),
'versicolor&0&setosa&7&logreg-no-intercept': np.array([0.3960573476702509, -0.35483870967741943]),
'versicolor&0&setosa&8&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'versicolor&0&setosa&8&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'versicolor&0&setosa&8&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'versicolor&0&setosa&8&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'versicolor&0&setosa&9&tree-intercept': np.array([-0.8256880733944945, 7.845915558428959e-17]),
'versicolor&0&setosa&9&tree-no-intercept': np.array([-0.9444820582261341, -0.10358835477318894]),
'versicolor&0&setosa&9&logreg-intercept': np.array([-1.1620795107033637, 0.02666666666666693]),
'versicolor&0&setosa&9&logreg-no-intercept': np.array([-1.025727826675694, 0.14556533513879485]),
'versicolor&0&setosa&10&tree-intercept': np.array([-0.8256880733944945, 7.845915558428959e-17]),
'versicolor&0&setosa&10&tree-no-intercept': np.array([-0.9444820582261341, -0.10358835477318894]),
'versicolor&0&setosa&10&logreg-intercept': np.array([-1.1314984709480123, -0.026666666666666502]),
'versicolor&0&setosa&10&logreg-no-intercept': np.array([-1.005416384563304, 0.0832769126607989]),
'versicolor&0&setosa&11&tree-intercept': np.array([-0.8176271186440669, 0.09762711864406831]),
'versicolor&0&setosa&11&tree-no-intercept': np.array([-0.9425675675675675, -0.06841216216216213]),
'versicolor&0&setosa&11&logreg-intercept': np.array([-1.0214689265536716, 0.22146892655367253]),
'versicolor&0&setosa&11&logreg-no-intercept': np.array([-1.0641891891891893, 0.16469594594594594]),
'versicolor&0&setosa&12&tree-intercept': np.array([-0.7200000000000022, -1.280753281207582e-16]),
'versicolor&0&setosa&12&tree-no-intercept': np.array([-0.9110204081632652, -0.19102040816326532]),
'versicolor&0&setosa&12&logreg-intercept': np.array([-0.6666666666666671, -0.026666666666666783]),
'versicolor&0&setosa&12&logreg-no-intercept': np.array([-0.8506122448979593, -0.2106122448979592]),
'versicolor&0&setosa&13&tree-intercept': np.array([-0.7200000000000022, -1.280753281207582e-16]),
'versicolor&0&setosa&13&tree-no-intercept': np.array([-0.9110204081632652, -0.19102040816326532]),
'versicolor&0&setosa&13&logreg-intercept': np.array([-0.6666666666666671, -0.026666666666666783]),
'versicolor&0&setosa&13&logreg-no-intercept': np.array([-0.8506122448979593, -0.2106122448979592]),
'versicolor&0&setosa&14&tree-intercept': np.array([-0.7200000000000022, 0.16513761467889929]),
'versicolor&0&setosa&14&tree-no-intercept': np.array([-0.9014011416709913, -0.12610275038920604]),
'versicolor&0&setosa&14&logreg-intercept': np.array([-0.7200000000000022, 0.16513761467889929]),
'versicolor&0&setosa&14&logreg-no-intercept': np.array([-0.9014011416709913, -0.12610275038920604]),
'versicolor&0&setosa&15&tree-intercept': np.array([-0.5866666666666676, -0.026666666666666575]),
'versicolor&0&setosa&15&tree-no-intercept': np.array([-0.8718367346938776, -0.3118367346938776]),
'versicolor&0&setosa&15&logreg-intercept': np.array([-0.6400000000000003, -1.848990096833507e-16]),
'versicolor&0&setosa&15&logreg-no-intercept': np.array([-0.8914285714285715, -0.2514285714285715]),
'versicolor&0&setosa&16&tree-intercept': np.array([-0.5866666666666676, -0.026666666666666575]),
'versicolor&0&setosa&16&tree-no-intercept': np.array([-0.8718367346938776, -0.3118367346938776]),
'versicolor&0&setosa&16&logreg-intercept': np.array([-0.6400000000000003, -1.848990096833507e-16]),
'versicolor&0&setosa&16&logreg-no-intercept':
|
np.array([-0.8914285714285715, -0.2514285714285715])
|
numpy.array
|
import numpy as np
from MLlib.activations import sigmoid
class MeanSquaredError():
@staticmethod
def loss(X, Y, W):
M = X.shape[0]
return np.sum((np.dot(X, W).T - Y) ** 2) / (2 * M)
@staticmethod
def derivative(X, Y, W):
M = X.shape[0]
return np.dot((
|
np.dot(X, W)
|
numpy.dot
|
# -*- coding: utf-8 -*-
import sys, logging
import numpy as np
from scipy.stats import hypergeom, fisher_exact
def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background)
elif isinstance(background, int):
bg = background
else:
raise ValueError("background should be set or int object")
# number of genes in your query data
k = len(query)
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = set(gene_sets.get(s))
# the categories should be only exist in custom background too
if isinstance(background, set):
category = category.intersection(background)
hits = query.intersection(category)
x = len(hits) # overlap hits
if x < 1 : continue
m = len(category)
# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
pval = hypergeom.sf(x-1, bg, m, k)
#oddr, pval2 = odds_ratio_calc(bg, k, m, x)
# expect_count = k*m/bg
# oddr= x / expect_count
# oddr= (x*(bg-m))/(m*(k-x)) # thanks to @sreichl.
oddr= ((x+0.5)*(bg-m+0.5))/((m+0.5)*(k-x+0.5)) # Haldane-Anscombe correction, issue #132
vals.append((s, pval, oddr, x, m, hits))
return zip(*vals)
# def odds_ratio_calc(bg_n, gene_list_n, gene_set_n, overlap_n):
# """
# bg_n = number of background genes
# gene_list_n = number of genes in the gene list (ie query genes)
# gene_set_n = number of genes in the (corrected by background) gene set (eg pathways/GO terms)
# overlap_n = number of genes overlapping with between the (corrected by background) gene set and the gene list
# """
# # make contingency table
# table=np.array([[gene_set_n, bg_n-gene_set_n],[overlap_n, gene_list_n-overlap_n]])
# # perform Fisher's exact test
# oddsratio, pvalue = fisher_exact(table)
# # return (inverse) oddsratio
# return 1/oddsratio, pvalue
def _ecdf(x):
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
def fdrcorrection(pvals, alpha=0.05):
""" benjamini hocheberg fdr correction. inspired by statsmodels
"""
# Implement copy from GOATools.
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
ecdffactor = _ecdf(pvals_sorted)
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
def multiple_testing_correction(ps, alpha=0.05, method='benjamini-hochberg', **kwargs):
""" correct pvalues for multiple testing and add corrected `q` value
:param ps: list of pvalues
:param alpha: significance level default : 0.05
:param method: multiple testing correction method [bonferroni|benjamini-hochberg]
:returns (q, rej): two lists of q-values and rejected nodes
"""
# Implement copy from GOATools.
_p =
|
np.array(ps)
|
numpy.array
|
# Copyright (c) 2017 <NAME>
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import SparseGP
from .. import likelihoods
from .. import kern
from .. import util
from GPy.core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization.param import Param
from paramz.transformations import Logexp
from ..util.linalg import tdot
from .sparse_gp_regression_md import SparseGPRegressionMD
class GPMultioutRegressionMD(SparseGP):
"""Gaussian Process model for multi-output regression with missing data
This is an implementation of Latent Variable Multiple Output
Gaussian Processes (LVMOGP) in [Dai_et_al_2017]_. This model
targets at the use case, in which each output dimension is
observed at a different set of inputs. The model takes a different
data format: the inputs and outputs observations of all the output
dimensions are stacked together correspondingly into two
matrices. An extra array is used to indicate the index of output
dimension for each data point. The output dimensions are indexed
using integers from 0 to D-1 assuming there are D output
dimensions.
.. rubric:: References
.. [Dai_et_al_2017] <NAME>.; <NAME>.; Lawrence, N.D: Efficient Modeling of Latent Information in Supervised Learning using Gaussian Processes. In NIPS, 2017.
:param X: input observations.
:type X: numpy.ndarray
:param Y: output observations, each column corresponding to an output dimension.
:type Y: numpy.ndarray
:param indexD: the array containing the index of output dimension for each data point
:type indexD: numpy.ndarray
:param int Xr_dim: the dimensionality of a latent space, in which output dimensions are embedded in
:param kernel: a GPy kernel for GP of individual output dimensions ** defaults to RBF **
:type kernel: GPy.kern.Kern or None
:param kernel_row: a GPy kernel for the GP of the latent space ** defaults to RBF **
:type kernel_row: GPy.kern.Kern or None
:param Z: inducing inputs
:type Z: numpy.ndarray or None
:param Z_row: inducing inputs for the latent space
:type Z_row: numpy.ndarray or None
:param X_row: the initial value of the mean of the variational posterior distribution of points in the latent space
:type X_row: numpy.ndarray or None
:param Xvariance_row: the initial value of the variance of the variational posterior distribution of points in the latent space
:type Xvariance_row: numpy.ndarray or None
:param num_inducing: a tuple (M, Mr). M is the number of inducing points for GP of individual output dimensions. Mr is the number of inducing points for the latent space.
:type num_inducing: (int, int)
:param int qU_var_r_W_dim: the dimensionality of the covariance of q(U) for the latent space. If it is smaller than the number of inducing points, it represents a low-rank parameterization of the covariance matrix.
:param int qU_var_c_W_dim: the dimensionality of the covariance of q(U) for the GP regression. If it is smaller than the number of inducing points, it represents a low-rank parameterization of the covariance matrix.
:param str init: the choice of initialization: 'GP' or 'rand'. With 'rand', the model is initialized randomly. With 'GP', the model is initialized through a protocol as follows: (1) fits a sparse GP (2) fits a BGPLVM based on the outcome of sparse GP (3) initialize the model based on the outcome of the BGPLVM.
:param boolean heter_noise: whether assuming heteroscedastic noise in the model, boolean
:param str name: the name of the model
"""
def __init__(self, X, Y, indexD, Xr_dim, kernel=None, kernel_row=None, Z=None, Z_row=None, X_row=None, Xvariance_row=None, num_inducing=(10,10), qU_var_r_W_dim=None, qU_var_c_W_dim=None, init='GP', heter_noise=False, name='GPMRMD'):
assert len(Y.shape)==1 or Y.shape[1]==1
self.output_dim = int(np.max(indexD))+1
self.heter_noise = heter_noise
self.indexD = indexD
#Kernel
if kernel is None:
kernel = kern.RBF(X.shape[1])
if kernel_row is None:
kernel_row = kern.RBF(Xr_dim,name='kern_row')
if init=='GP':
from . import SparseGPRegression, BayesianGPLVM
from ..util.linalg import jitchol
Mc, Mr = num_inducing
print('Intializing with GP...')
print('Fit Sparse GP...')
m_sgp = SparseGPRegressionMD(X,Y,indexD,kernel=kernel.copy(),num_inducing=Mc)
m_sgp.likelihood.variance[:] = Y.var()*0.01
m_sgp.optimize(max_iters=1000)
print('Fit BGPLVM...')
m_lvm = BayesianGPLVM(m_sgp.posterior.mean.copy().T,Xr_dim,kernel=kernel_row.copy(), num_inducing=Mr)
m_lvm.likelihood.variance[:] = m_lvm.Y.var()*0.01
m_lvm.optimize(max_iters=10000)
kernel[:] = m_sgp.kern.param_array.copy()
kernel.variance[:] = np.sqrt(kernel.variance)
Z = m_sgp.Z.values.copy()
kernel_row[:] = m_lvm.kern.param_array.copy()
kernel_row.variance[:] = np.sqrt(kernel_row.variance)
Z_row = m_lvm.Z.values.copy()
X_row = m_lvm.X.mean.values.copy()
Xvariance_row = m_lvm.X.variance.values
qU_mean = m_lvm.posterior.mean.T.copy()
qU_var_col_W = jitchol(m_sgp.posterior.covariance)
qU_var_col_diag = np.full(Mc,1e-5)
qU_var_row_W = jitchol(m_lvm.posterior.covariance)
qU_var_row_diag = np.full(Mr,1e-5)
print('Done.')
else:
qU_mean = np.zeros(num_inducing)
qU_var_col_W = np.random.randn(num_inducing[0],num_inducing[0] if qU_var_c_W_dim is None else qU_var_c_W_dim)*0.01
qU_var_col_diag = np.full(num_inducing[0],1e-5)
qU_var_row_W = np.random.randn(num_inducing[1],num_inducing[1] if qU_var_r_W_dim is None else qU_var_r_W_dim)*0.01
qU_var_row_diag = np.full(num_inducing[1],1e-5)
if Z is None:
Z = X[np.random.permutation(X.shape[0])[:num_inducing[0]]].copy()
if X_row is None:
X_row =
|
np.random.randn(self.output_dim,Xr_dim)
|
numpy.random.randn
|
import librosa
import numpy as np
import copy
from numpy.lib.stride_tricks import as_strided
from scipy.fftpack import dct, idct
from scipy.signal import butter, lfilter
import scipy.ndimage
import tensorflow as tf
log = False
# Most of the Spectrograms and Inversion are taken from: https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError('Window size must be even!')
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError('Window size must be even!')
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def denoise(spectogram):
denoised = np.copy(spectogram)
print(np.mean(denoised))
print(np.min(denoised))
denoised = np.log1p(denoised)
print(np.mean(denoised))
print(np.min(denoised))
denoised[np.where(denoised < 8)] = 0
denoised = np.expm1(denoised)
print(np.mean(denoised))
print(np.min(denoised))
return denoised
def revert_stft(y, fft_size, num_iter):
if log:
y = np.expm1(y)
p = 2 * np.pi * np.random.random_sample(y.shape) - np.pi
x = None
for i in range(num_iter):
S = y * np.exp(1j * p)
x = librosa.istft(S, hop_length=fft_size // 4)
p = np.angle(librosa.stft(x, n_fft=fft_size, hop_length=fft_size // 4))
return x
def stft(x, fft_size):
S = librosa.stft(x, n_fft=fft_size, hop_length=fft_size // 4)
S = np.abs(S)
if log:
S = np.log1p(S)
return S
def istft(X, fftsize=128, mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = np.fft.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = np.fft.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2] = X
X_pad[:, fftsize // 2:] = 0
X = X_pad
X = local_ifft(X).astype('float64')
X = invert_halfoverlap(X)
if mean_normalize:
X -= np.mean(X)
return X
def pretty_spectrogram(d, log=True, thresh=5, fft_size=512, step_size=64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False,
compute_onesided=True))
maxi = 1
if log == True:
maxi = specgram.max()
specgram /= maxi # volume normalize to max 1
# print('Max :' + str(maxi))
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram, maxi
# Also mostly modified or taken from https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def invert_pretty_spectrogram(X_s, log=True, fft_size=512, step_size=512 / 4, n_iter=10):
if log == True:
X_s = np.power(10, X_s)
X_s = np.concatenate([X_s, X_s[:, ::-1]], axis=1)
X_t = iterate_invert_spectrogram(X_s, fft_size, step_size, n_iter=n_iter)
return X_t
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
<NAME> and <NAME>. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
<NAME>, <NAME> and <NAME>. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
<NAME>, <NAME>, <NAME>. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg =
|
np.max(X_s)
|
numpy.max
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from federatedml.ftl.eggroll_computation.helper import compute_sum_XY
class Autoencoder(object):
def __init__(self, an_id):
super(Autoencoder, self).__init__()
self.id = str(an_id)
self.sess = None
self.built = False
self.lr = None
self.input_dim = None
self.hidden_dim = None
def set_session(self, sess):
self.sess = sess
def get_session(self):
return self.sess
def build(self, input_dim, hidden_dim, learning_rate=1e-2):
if self.built:
return
self.lr = learning_rate
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self._set_variable_initializer()
self._build_model()
def _set_variable_initializer(self):
self.Wh_initializer = tf.random_normal((self.input_dim, self.hidden_dim), dtype=tf.float64)
self.bh_initializer = np.zeros(self.hidden_dim).astype(np.float64)
self.Wo_initializer = tf.random_normal((self.hidden_dim, self.input_dim), dtype=tf.float64)
self.bo_initializer = np.zeros(self.input_dim).astype(np.float64)
def _build_model(self):
self._add_input_placeholder()
self._add_encoder_decoder_ops()
self._add_forward_ops()
self._add_representation_training_ops()
self._add_e2e_training_ops()
self._add_encrypt_grad_update_ops()
self.built = True
def _add_input_placeholder(self):
self.X_in = tf.placeholder(tf.float64, shape=(None, self.input_dim))
def _add_encoder_decoder_ops(self):
self.encoder_vars_scope = self.id + "_encoder_vars"
with tf.variable_scope(self.encoder_vars_scope):
self.Wh = tf.get_variable("weights", initializer=self.Wh_initializer, dtype=tf.float64)
self.bh = tf.get_variable("bias", initializer=self.bh_initializer, dtype=tf.float64)
self.decoder_vars_scope = self.id + "_decoder_vars"
with tf.variable_scope(self.decoder_vars_scope):
self.Wo = tf.get_variable("weights", initializer=self.Wo_initializer, dtype=tf.float64)
self.bo = tf.get_variable("bias", initializer=self.bo_initializer, dtype=tf.float64)
def _add_forward_ops(self):
self.Z = self._forward_hidden(self.X_in)
self.logits = self._forward_logits(self.X_in)
self.X_hat = self._forward_output(self.X_in)
def _add_representation_training_ops(self):
vars_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.encoder_vars_scope)
self.init_grad = tf.placeholder(tf.float64, shape=(None, self.hidden_dim))
self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss=self.Z, var_list=vars_to_train,
grad_loss=self.init_grad)
def _add_e2e_training_ops(self):
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.X_in))
self.e2e_train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
def _add_encrypt_grad_update_ops(self):
self.Z_grads = tf.gradients(self.Z, xs=[self.Wh, self.bh])
self.grads_W_new = tf.placeholder(tf.float64, shape=[self.input_dim, self.hidden_dim])
self.grads_b_new = tf.placeholder(tf.float64, shape=[self.hidden_dim])
self.new_Wh = self.Wh.assign(self.Wh - self.lr * self.grads_W_new)
self.new_bh = self.bh.assign(self.bh - self.lr * self.grads_b_new)
def _forward_hidden(self, X):
return tf.sigmoid(tf.matmul(X, self.Wh) + self.bh)
def _forward_logits(self, X):
Z = self._forward_hidden(X)
return tf.matmul(Z, self.Wo) + self.bo
def _forward_output(self, X):
return tf.sigmoid(self._forward_logits(X))
def transform(self, X):
return self.sess.run(self.Z, feed_dict={self.X_in: X})
def compute_gradients(self, X):
grads_W_collector = []
grads_b_collector = []
for i in range(len(X)):
grads_w_i, grads_b_i = self.sess.run(self.Z_grads, feed_dict={self.X_in: np.expand_dims(X[i], axis=0)})
grads_W_collector.append(grads_w_i)
grads_b_collector.append(grads_b_i)
return [
|
np.array(grads_W_collector)
|
numpy.array
|
import pickle
import json
import math
from pathlib import Path
import torch
from tqdm import tqdm
import numpy as np
from autolab_core import RigidTransform
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
LABELS = [
"None",
"Car",
"Truck",
"Bus",
"Pedestrian",
"Bike",
"Moto",
"Fence",
"Van",
"Animal",
"Cone",
"RoadBlock",
"Generic",
"Unknown",
"Unknown",
"Unknown"
]
class Object3D(object):
def __init__(self, lidar_label):
# extract label, truncation, occlusion
self.type = LABELS[lidar_label['attribute']['label_type']] # 'Car', 'Pedestrian', ...
self.truncation = 0.0 # truncated pixel ratio [0..1]
self.occlusion = 0 # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = -1 # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = -10 # left
self.ymin = -10 # top
self.xmax = -10 # right
self.ymax = -10 # bottom
self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])
# extract 3d bounding box information
self.h = lidar_label['dimensions']['height'] # box height
self.w = lidar_label['dimensions']['width'] # box width
self.l = lidar_label['dimensions']['length'] # box length (in meters)
self.t = (lidar_label['bottom_center']['x'],
lidar_label['bottom_center']['y'],
lidar_label['bottom_center']['z']) # location (x,y,z)
self.rz = lidar_label['yaw'] # yaw angle [-pi..pi]
class Calibration(object):
def __init__(self,
calib_infos,
img_width=1000,
img_height=1000):
self.car_heading = calib_infos["car_heading"]
self.car_position = calib_infos["car_position"]
self.lidar_heading = calib_infos["device_heading"]
self.lidar_position = calib_infos["device_position"]
self.lidar_info = calib_infos["pointcloud_info"]
self.cams_info = calib_infos["images"]
if "radar_points" in calib_infos:
self.radar_pts = calib_infos["radar_points"]
self.img_width = img_width
self.img_height = img_height
def cart2hom(self, pts_3d):
''' Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
'''
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n,1))))
return pts_3d_hom
def lidar2world(self, pc):
pc_offsets = np.array(self.lidar_info["offset"], dtype=np.float64)
return pc + pc_offsets
# 3d to 3d
def world2car(self, pc):
rotation_quaternion = np.asarray([self.car_heading['w'], self.car_heading['x'],
self.car_heading['y'], self.car_heading['z']])
translation = np.asarray([self.car_position['x'], self.car_position['y'], self.car_position['z']])
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(np.linalg.inv(Mat), self.cart2hom(pc).T).T[:, 0:3]
def car2world(self, pc):
rotation_quaternion = np.asarray([self.car_heading['w'], self.car_heading['x'],
self.car_heading['y'], self.car_heading['z']])
translation = np.asarray([self.car_position['x'], self.car_position['y'], self.car_position['z']])
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(Mat, self.cart2hom(pc).T).T[:, 0:3]
def world2dev(self, pc):
rotation_quaternion = np.asarray([self.lidar_heading['w'], self.lidar_heading['x'],
self.lidar_heading['y'], self.lidar_heading['z']])
translation = np.asarray([self.lidar_position['x'], self.lidar_position['y'], self.lidar_position['z']])
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(np.linalg.inv(Mat), self.cart2hom(pc).T).T[:, 0:3]
def dev2world(self, pc):
rotation_quaternion = np.asarray([self.lidar_heading['w'], self.lidar_heading['x'],
self.lidar_heading['y'], self.lidar_heading['z']])
translation = np.asarray([self.lidar_position['x'], self.lidar_position['y'], self.lidar_position['z']])
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(Mat, self.cart2hom(pc).T).T[:, 0:3]
def world2cam(self, pc, cam_channel='front_left'):
cur_cam_info = None
for info in self.cams_info:
if info["cam_id"] == cam_channel:
cur_cam_info = info
break
if cur_cam_info is None:
raise ValueError("Camera channel %s is not supported now!" % cam_channel)
rotation_quaternion = np.asarray([cur_cam_info['heading']['w'], cur_cam_info['heading']['x'],
cur_cam_info['heading']['y'], cur_cam_info['heading']['z']], dtype=np.float64)
translation = np.asarray([cur_cam_info['position']['x'], cur_cam_info['position']['y'],
cur_cam_info['position']['z']], dtype=np.float64)
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(np.linalg.inv(Mat), self.cart2hom(pc).T).T[:, 0:3]
def cam2world(self, pc, cam_channel='front_left'):
cur_cam_info = None
for info in self.cams_info:
if info["cam_id"] == cam_channel:
cur_cam_info = info
break
if cur_cam_info is None:
raise ValueError("Camera channel %s is not supported now!" % cam_channel)
rotation_quaternion = np.asarray([cur_cam_info['heading']['w'], cur_cam_info['heading']['x'],
cur_cam_info['heading']['y'], cur_cam_info['heading']['z']])
translation = np.asarray(
[cur_cam_info['position']['x'], cur_cam_info['position']['y'], cur_cam_info['position']['z']])
T_qua2rota = RigidTransform(rotation_quaternion, translation)
Trans = T_qua2rota.translation
Rot = T_qua2rota.rotation
Mat = np.zeros((4, 4), dtype=np.float64)
Mat[0:3, 0:3] = Rot
Mat[3, 3] = 1
Mat[0:3, 3] = Trans
return np.matmul(Mat, self.cart2hom(pc).T).T[:, 0:3]
def lidar2dev(self, pc):
pc_world = self.lidar2world(pc)
pc_dev = self.world2dev(pc_world)
return pc_dev
def lidar2car(self, pc):
pc_world = self.lidar2world(pc)
pc_car = self.world2car(pc_world)
return pc_car
def lidar2cam(self, pc, cam_channel='front_left'):
pc_world = self.lidar2world(pc)
pc_cam = self.world2cam(pc_world,cam_channel=cam_channel)
return pc_cam
def car2cam(self, pc):
mat = np.array([[-0.081515, -0.078592, 0.993569, 4.792509],
[-0.996604, -0.005272, -0.082181, 0.739551],
[0.011697, -0.996893, -0.077895, 1.927075],
[0.000000, 0.000000, 0.000000, 1.000000]], dtype=np.float64)
return np.matmul(
|
np.linalg.inv(mat)
|
numpy.linalg.inv
|
import numpy as np
from sitator.util import PBCCalculator
from sitator.visualization import SiteTrajectoryPlotter
from sitator.util.progress import tqdm
import logging
logger = logging.getLogger(__name__)
class SiteTrajectory(object):
"""A trajectory capturing the dynamics of particles through a SiteNetwork."""
SITE_UNKNOWN = -1
def __init__(self,
site_network,
particle_assignments,
confidences = None):
"""
:param SiteNetwork site_network:
:param ndarray (n_frames, n_mobile) particle_assignments:
:param ndarray (n_frames, n_mobile) confidences (optional): the confidence
with which each assignment was made.
"""
if particle_assignments.ndim != 2:
raise ValueError("particle_assignments must be 2D")
if particle_assignments.shape[1] != site_network.n_mobile:
raise ValueError("particle_assignments has wrong shape %s" % particle_assignments.shape)
self._sn = site_network
self._traj = particle_assignments.copy()
if not confidences is None:
if confidences.shape != particle_assignments.shape:
raise ValueError("confidences has wrong shape %s; should be %s" % (confidences.shape, particle_assignments.shape))
self._confs = confidences
else:
self._confs = None
self._real_traj = None
self._default_plotter = None
def __len__(self):
return self.n_frames
def __getitem__(self, key):
st = type(self)(self._sn,
self._traj[key],
confidences = None if self._confs is None else self._confs[key])
if not self._real_traj is None:
st.set_real_traj(self._real_traj[key])
return st
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Don't want to pickle giant trajectories or uninteresting plotters
state['_real_traj'] = None
state['_default_plotter'] = None
return state
@property
def traj(self):
"""The site assignments over time."""
return self._traj
@property
def confidences(self):
return self._confs
@property
def n_frames(self):
"""The number of frames in the trajectory."""
return len(self._traj)
@property
def n_unassigned(self):
"""The total number of times a mobile particle is unassigned."""
return np.sum(self._traj < 0)
@property
def n_assigned(self):
"""The total number of times a mobile particle was assigned to a site."""
return self._sn.n_mobile * self.n_frames - self.n_unassigned
@property
def percent_unassigned(self):
"""Proportion of particle positions that are unassigned over all time."""
return float(self.n_unassigned) / (self._sn.n_mobile * self.n_frames)
@property
def site_network(self):
return self._sn
@site_network.setter
def site_network(self, value):
# Captures len, #, and dist.
assert
|
np.all(value.mobile_mask == self._sn.mobile_mask)
|
numpy.all
|
from __future__ import print_function
import acq4.Manager
import numpy as np
import acq4.analysis.tools.functions as afn
import scipy
#from acq4.util.pyqtgraph.multiprocess import Parallelize
from pyqtgraph.debug import Profiler
import os, sys
from six.moves import range
try:
import cv2
HAVE_CV2 = True
except ImportError:
HAVE_CV2 = False
probabilityInputs = np.array([
[[0,0,0], ## 2011.12.14 s0c1
[0,1,1],
[0,1,1],
[1,0,0]],
[[0,1,1], ## 2012.01.04 s1c0
[1,1,1],
[0,0,0],
[1,0,0]],
[[0,0,0], ## 2012.01.06 s0c1
[0,1,1],
[0,0,0],
[0,0,1]],
[[0,0,1], ## 2012.01.06 s1c0
[0,1,0],
[0,1,0],
[0,0,0]],
[[0,0,0], ## 2012.01.19 s0c0
[0,0,0],
[0,0,0],
[0,0,0]],
[[0,0,1], ## 2012.01.19 s0c1
[1,1,1],
[1,0,0],
[0,1,0]],
[[1,1,0], ## 2012.01,25 s0c0
[0,1,1],
[0,0,0],
[0,1,1]],
[[0,0,0], ## 2012.02.01 s0c0
[0,1,0],
[0,0,0],
[0,0,0]],
[[0,0,0], ## 2012.02.08 s0c0
[1,1,0],
[0,0,0],
[1,0,0]],
[[0,1,0], ## 2012.02.18 s0c0
[0,1,0],
[0,0,0],
[0,1,0]],
[[0,0,0], ## 2012.02.18 s1c0
[1,1,1],
[0,0,0],
[0,1,0]],
[[0,0,1], ## 2012.02.18 s1c1
[1,1,0],
[0,0,1],
[0,1,1]],
[[0,0,1], ## 2012.02.20 s0c0
[1,1,1],
[0,0,0],
[1,0,1]],
[[0,1,0], ## 2012.02.22 s2c0
[0,1,0],
[0,0,0],
[0,0,0]],
[[0,1,1], ## 2012.02.23 s1c0
[0,1,0],
[0,1,1],
[0,0,1]],
[[0,0,0], ## 2012.02.23 s1c1
[0,1,0],
[0,0,1],
[1,0,0]],
[[0,1,0], ## 2012.02.24 s0c0
[1,1,0],
[1,0,0],
[1,1,0]],
[[0,0,0], ## 2012.02.26 s0c0
[1,1,1],
[0,1,1],
[0,1,0]],
[[0,0,0], ## 2012.02.26 s1c1
[0,1,0],
[0,0,0],
[1,1,1]],
[[0,1,0], ## 2012.02.27 s0c0
[0,1,1],
[0,0,0],
[1,1,0]],
[[0,0,0], ## 2012.02.27 s0c1
[0,1,0],
[0,0,1],
[0,1,0]],
[[0,0,0], ## 2012.02.27 s1c0
[1,1,1],
[0,1,0],
[1,1,1]]])
def reserveArray(data, spacing=5e-6):
cells = set(data['CellDir'])
n = len(cells)
xmin = data['xPos'].min()
ymin = data['yPos'].min()
xdim = int((data['xPos'].max()-xmin)/spacing)+5
ydim = int((data['yPos'].max()-ymin)/spacing)+5
return np.zeros((n, xdim, ydim), dtype=float)
def calculateProb(sites, spacing=5e-6, keys=None):
cells = set(sites['CellDir'])
arr = reserveArray(sites, spacing)
for i, c in enumerate(cells):
sites = data[data['CellDir']==c]
spontRate = sites['numOfPreEvents'].sum()/sites['PreRegionLen'].sum()
sites = afn.bendelsSpatialCorrelationAlgorithm(sites, 90e-6, spontRate, sites[0]['PostRegionLen'])
for s in sites:
x, y = (int((s['xPos']-xmin)/spacing), int((s['yPos']-ymin)/spacing))
arr[i, x, y] = s['prob']
return arr
def interpolateSlice(sites, spacing=5e-6, method='nearest', probThreshold=0.05):
xmin = sites['xPos'].min()
ymin = sites['yPos'].min()
xdim = int((sites['xPos'].max()-xmin)/spacing)+5
ydim = int((sites['yPos'].max()-ymin)/spacing)+5
cells = set(sites['CellDir'])
n = len(cells)
arr = np.zeros((n, xdim, ydim), dtype=float)
results = []
for i, c in enumerate(cells):
data = sites[sites['CellDir'] == c]
pts = np.array([data['xPos'], data['yPos']], dtype=float)
pts[0] = pts[0]-xmin
pts[1] = pts[1]-ymin
pts = pts.transpose()/spacing
xi = np.indices((xdim, ydim))
xi = xi.transpose(1,2,0)
spontRate = data['numOfPreEvents'].sum()/data['PreRegionLen'].sum()
data = afn.bendelsSpatialCorrelationAlgorithm(data, 90e-6, spontRate, data[0]['PostRegionLen'])
#print data['prob'].max()
data['prob'][data['prob'] < probThreshold] = 2.
# print data['prob'].max()
data['prob'][(data['prob'] >= probThreshold)*(data['prob']!= 2.)] = 0.
data['prob'][data['prob'] == 2.] = 1.
#print data['prob'].max()
#print "========= ", data['prob'].mean()
res = scipy.interpolate.griddata(pts, data['prob'], xi, method=method)
arr[i] = res
results.append(res)
#arr[i][np.isnan(arr[i])] = .1
return arr
def interpolateCells(sites, spacing=5e-6, method='nearest', probThreshold=0.05):
avgCellX = np.array(list(set(sites['CellXPos']))).mean()
avgCellY = np.array(list(set(sites['CellYPos']))).mean()
xmin = (sites['xPos']-sites['CellXPos']).min() ## point furthest left of the cell
ymin = (sites['yPos']-sites['CellYPos']).min() ## point furthest above the cell
xmax = (sites['xPos']-sites['CellXPos']).max()
ymax = (sites['yPos']-sites['CellXPos']).max()
xdim = int((xmax-xmin)/spacing)+10
ydim = int((ymax-ymin)/spacing)+10
avgCellIndex = np.array([int((avgCellX-xmin)/spacing)+5, int((avgCellY-ymin)/spacing)+5])
cells = set(sites['CellDir'])
n = len(cells)
arr = np.zeros((n, xdim, ydim), dtype=float)
results = []
print('xdim:', xdim, 'ydim:', ydim)
for i, c in enumerate(cells):
data = sites[sites['CellDir'] == c]
trans1 = (data['CellXPos'][0] - avgCellX, data['CellYPos'][0]-avgCellY)
trans2 = (avgCellX+xmin, avgCellY+ymin)
pts = np.array([data['xPos']-trans1[0]-trans2[0], data['yPos']-trans1[1]-trans2[1]], dtype=float)
#pts[0] = pts[0]+(avgCellX-xmin)
#pts[1] = pts[1]+(avgCellY-ymin)
xlimits = (int((data['xPos'].min()-trans1[0]-trans2[0])/spacing), int((data['xPos'].max()-trans1[0]-trans2[0])/spacing))
ylimits = (int((data['yPos'].min()-trans1[1]-trans2[1])/spacing), int((data['yPos'].max()-trans1[1]-trans2[1])/spacing))
print('xlimits:', xlimits, ' ylimits:', ylimits)
pts = pts.transpose()/spacing
xi = np.indices((xdim, ydim))
xi = xi.transpose(1,2,0)
spontRate = data['numOfPreEvents'].sum()/data['PreRegionLen'].sum()
data = afn.bendelsSpatialCorrelationAlgorithm(data, 90e-6, spontRate, data[0]['PostRegionLen'])
#print data['prob'].max()
data['prob'][data['prob'] < probThreshold] = 2.
# print data['prob'].max()
data['prob'][(data['prob'] >= probThreshold)*(data['prob']!= 2.)] = 0.
data['prob'][data['prob'] == 2.] = 1.
#print data['prob'].max()
#print "========= ", data['prob'].mean()
res = scipy.interpolate.griddata(pts, data['prob'], xi, method=method)
res[:xlimits[0], :] = 0
res[xlimits[1]+1:, :] = 0
res[:, :ylimits[0]] = 0
res[:, ylimits[1]+1:] = 0
arr[i] = res
results.append(res)
#arr[i][np.isnan(arr[i])] = .1
return arr, (xmin, ymin)
def convolveCells(sites, spacing=5e-6, probThreshold=0.02, probRadius=90e-6, timeWindow=0.1, eventKey='numOfPostEvents'):
#avgCellX = np.array(list(set(sites['xPosCell']))).mean()
#avgCellY = np.array(list(set(sites['yPosCell']))).mean()
#xmin = (sites['xPos']-sites['xPosCell']).min() ## point furthest left of the cell
xmin = sites['xPosCell'].min()
#ymin = (sites['yPos']-sites['yPosCell']).min() ## point furthest above the cell
ymin = sites['yPosCell'].min()
#xmax = (sites['xPos']-sites['xPosCell']).max()
xmax = sites['xPosCell'].max()
#ymax = (sites['yPos']-sites['yPosCell']).max()
ymax = sites['yPosCell'].max()
xdim = int((xmax-xmin)/spacing)+10
ydim = int((ymax-ymin)/spacing)+10
#avgCellIndex = np.array([int((avgCellX-xmin)/spacing)+5, int((avgCellY-ymin)/spacing)+5])
cells = set(sites['CellDir'])
n = len(cells)
arr = np.zeros((n, xdim, ydim), dtype=float)
results = []
for i, c in enumerate(cells):
data = sites[sites['CellDir']==c]
spontRate = data['numOfPreEvents'].sum()/data['PreRegionLen'].sum()
data = afn.bendelsSpatialCorrelationAlgorithm(data, probRadius, spontRate, timeWindow=timeWindow, eventKey=eventKey)
probs = np.zeros(len(data))
probs[data['prob'] < probThreshold] = 1.
for j, s in enumerate(data):
trans1 = (data['CellXPos'][0] - avgCellX, data['CellYPos'][0]-avgCellY)
trans2 = (avgCellX+xmin, avgCellY+ymin)
x, y = (int((s['xPos']-trans1[0]-trans2[0])/spacing), int((s['yPos']-trans1[1]-trans2[1])/spacing))
arr[i, x, y] = probs[j]
results.append(arr[i].copy())
arr[i] = scipy.ndimage.gaussian_filter(arr[i], 2)
arr[i] = arr[i]/0.039
arr[i][arr[i] > 0.02] = 1
#arr[i][(arr[i] > 0.02)*(arr[i] <=0.04)] = 1
arr[i][arr[i] <= 0.02] = 0
return arr, results
def convolveCells_Xuying(sites, spacing=5e-6, probThreshold=0.02, probRadius=90e-6):
#avgCellX = np.array(list(set(sites['xPosCell']))).mean()
#avgCellY = np.array(list(set(sites['yPosCell']))).mean()
#xmin = (sites['xPos']-sites['xPosCell']).min() ## point furthest left of the cell
xmin = sites['xPosCell'].min()
#ymin = (sites['yPos']-sites['yPosCell']).min() ## point furthest above the cell
ymin = sites['yPosCell'].min()
#xmax = (sites['xPos']-sites['xPosCell']).max()
xmax = sites['xPosCell'].max()
#ymax = (sites['yPos']-sites['yPosCell']).max()
ymax = sites['yPosCell'].max()
xdim = int((xmax-xmin)/spacing)+10
ydim = int((ymax-ymin)/spacing)+10
#avgCellIndex = np.array([int((avgCellX-xmin)/spacing)+5, int((avgCellY-ymin)/spacing)+5])
cells = set(sites['CellDir'])
n = len(cells)
arr =
|
np.zeros((n, xdim, ydim), dtype=float)
|
numpy.zeros
|
"""
Module containing class representing a distribution that is uniform over a
parallelepiped in an arbitrary number of dimensions.
**File**: $DISTPY/distpy/distribution/ParallelepipedDistribution.py
**Author**: <NAME>
**Date**: 31 May 2021
"""
import numpy as np
import numpy.random as rand
import numpy.linalg as lalg
from ..util import int_types, sequence_types, create_hdf5_dataset,\
get_hdf5_value
from .Distribution import Distribution
def _normed(vec):
"""
Finds and returns a normalized version of the given vector.
Parameters
----------
vec : sequence
vector to norm
Returns
-------
normed : `numpy.ndarray`
if `vec` is \\(\\boldsymbol{x}\\), then `normed` is
\\(\\boldsymbol{x}/\\sqrt{\\boldsymbol{x}\\cdot\\boldsymbol{x}}\\)
"""
arrvec = np.array(vec)
return (arrvec / np.sqrt(np.vdot(arrvec, arrvec)))
class ParallelepipedDistribution(Distribution):
"""
Class representing a distribution that is uniform over a parallelepiped in
an arbitrary number of dimensions.
"""
def __init__(self, center, face_directions, distances, metadata=None):
"""
Initializes a new `ParallelepipedDistribution` with the given parameter
values.
Parameters
----------
center : `numpy.ndarray`
array describing vector pointing to center of parallelepiped
face_directions : sequence
sequence of arrays giving unit vectors from center of
parallelepiped to its faces
distances : `numpy.ndarray`
array of distances to each face from the center
metadata : number or str or dict or `distpy.util.Savable.Savable`
data to store alongside this distribution.
"""
self.center = center
self.face_directions = face_directions
self.distances = distances
self.metadata = metadata
@property
def center(self):
"""
The center point of the parallelepiped.
"""
if not hasattr(self, '_center'):
raise AttributeError("center was referenced before it was set.")
return self._center
@center.setter
def center(self, value):
"""
Setter for `ParallelepipedDistribution.center`.
Parameters
----------
value : numpy.ndarray
1D numpy.ndarray of length `ParallelepipedDistribution.numparams`
"""
if (type(value) in sequence_types):
value = np.array(value)
if (value.ndim == 1):
self._center = value
else:
raise ValueError(('The number of dimensions of the center ' +\
'given to a ParallelepipedDistribution is not 1. It ' +\
'is {}-dimensional.').format(value.ndim))
else:
raise ValueError('A ParallelepipedDistribution was given with ' +\
'a center of an unrecognizable type.')
@property
def face_directions(self):
"""
A matrix encoding the directions to each face of the parallelepiped.
"""
if not hasattr(self, '_face_directions'):
raise AttributeError("face_directions was referenced before it " +\
"was set.")
return self._face_directions
@face_directions.setter
def face_directions(self, value):
"""
Setter for `ParallelepipedDistribution.face_directions`.
Parameters
----------
value : sequence
list of directions to the faces of the parallelepiped. These will
be normalized
"""
if (type(value) in sequence_types):
value = np.array(value)
if (value.shape == ((self.numparams,) * 2)):
self._face_directions = [_normed(value[i])\
for i in range(self.numparams)]
self._face_directions = np.matrix(self._face_directions)
else:
raise ValueError("The shape of the face_directions in " +\
"matrix form was not the expected value, which is " +\
"(self.numparams, self.numparams).")
else:
raise ValueError("A ParallelepipedDistribution was given " +\
"face_directions of an unrecognizable type.")
@property
def distances(self):
"""
The distances to each face of the parallelepiped.
"""
if not hasattr(self, '_distances'):
raise AttributeError("distances was referenced before it was set.")
return self._distances
@distances.setter
def distances(self, value):
"""
Setter for `ParallelepipedDistribution.distances`.
Parameter
---------
value : numpy.ndarray
1D array of positive numbers with the same shape as
`ParallelepipedDistribution.center`
"""
if (type(value) in sequence_types):
value = np.array(value)
if value.shape == (self.numparams,):
if np.all(value > 0):
self._distances = value
else:
raise ValueError("Not all distances were positive.")
else:
raise ValueError("distances given to " +\
"ParallelepipedDistribution have the wrong shape.")
else:
raise TypeError("distances was set to a non-sequence.")
@property
def inv_face_directions(self):
"""
The inverse of the matrix describing the directions to the faces of the
parallelepiped.
"""
if not hasattr(self, '_inv_face_directions'):
self._inv_face_directions = lalg.inv(self.face_directions)
return self._inv_face_directions
@property
def numparams(self):
"""
The number of parameters of this `ParallelepipedDistribution`.
"""
if not hasattr(self, '_numparams'):
self._numparams = len(self.center)
return self._numparams
@property
def mean(self):
"""
The mean of this `ParallelepipedDistribution`, which is the center of
the parallelepiped.
"""
if not hasattr(self, '_mean'):
self._mean = self.center
return self._mean
@property
def variance(self):
"""
The variance of the `ParallelepipedDistribution` class is not
implemented.
"""
if not hasattr(self, '_variance'):
raise AttributeError("variance is not implemented for the " +\
"ParallelepipedDistribution class.")
return self._variance
@property
def matrix(self):
"""
The matrix whose rows are vectors pointing from the vertex to all
adjacent vertices.
"""
if not hasattr(self, '_matrix'):
def row(index):
#
# Finds the index'th row of the matrix. Essentially, this is
# the vector from the vertex to the index'th adjacent vertex.
#
mod_dists = self.distances.copy()
mod_dists[index] = (mod_dists[index] * (-1))
from_cent = self.inv_face_directions * np.matrix(mod_dists).T
from_cent = np.array(from_cent).squeeze()
return self.center + from_cent - self.vertex
self._matrix = np.matrix([row(i) for i in range(self.numparams)])
return self._matrix
@property
def vertex(self):
"""
The vertex which satisfies
\\((\\boldsymbol{v}-\\boldsymbol{c})\\cdot\\boldsymbol{\\hat{n}}_i=\
d_i\\) for all \\(k\\), where \\(\\boldsymbol{v}\\) is the vertex,
\\(\\boldsymbol{c}\\) is the center, \\(\\boldsymbol{\\hat{n}}_k\\) is
the \\(k^{\\text{th}}\\) normalized face direction to the, and
\\(d_k\\) is the distance to the \\(k^{\\text{th}}\\) face.
"""
if not hasattr(self, '_vertex'):
from_cent = self.inv_face_directions * np.matrix(self.distances).T
from_cent = np.array(from_cent).squeeze()
self._vertex = self.center + from_cent
return self._vertex
@property
def area(self):
"""
The "area" (more like hypervolume in the general case) of the
parallelepiped-shaped region described by this
`ParallelepipedDistribution`.
"""
if not hasattr(self, '_area'):
self._area = np.abs(lalg.det(self.matrix))
return self._area
def draw(self, shape=None, random=rand):
"""
Draws point(s) from this `ParallelepipedDistribution`. Below, `p` is
`ParallelepipedDistribution.numparams`.
Parameters
----------
shape : int or tuple or None
- if None, returns single random variate as a 1D array of length
`p` is returned
- if int, \\(n\\), returns \\(n\\) random variates as a 2D
array of shape `(n,p)` is returned
- if tuple of \\(n\\) ints, returns `numpy.prod(shape)` random
variates as an \\((n+1)\\)-D array of shape `shape+(p,)` is
returned
random : `numpy.random.RandomState`
the random number generator to use (by default, `numpy.random` is
used)
Returns
-------
variates : float or `numpy.ndarray`
either single random variates or array of such variates. See
documentation of `shape` above for type and shape of return value
"""
none_shape = (type(shape) is type(None))
if none_shape:
shape = (1,)
elif type(shape) in int_types:
shape = (shape,)
transformed_point = random.rand(*(shape + (self.numparams,)))
points = self.vertex + np.dot(transformed_point, self.matrix.A)
if none_shape:
return points[0]
else:
return points
def log_value(self, point):
"""
Computes the logarithm of the value of this
`ParallelepipedDistribution` at the given point.
Parameters
----------
point : `numpy.ndarray`
if this distribution describes \\(p\\) parameters, `point` should
be a length-\\(p\\) `numpy.ndarray`
Returns
-------
value : float
natural logarithm of the value of this distribution at `point`. If
\\(f\\) is this distribution's PDF and \\(x\\) is `point`, then
`value` is \\(\\ln{\\big(f(x)\\big)}\\)
"""
if self._in_region(point):
return -np.log(self.area)
return -np.inf
def to_string(self):
"""
Finds and returns a string version of this `ParallelepipedDistribution`
of the form `"Parallelepiped(center, face_directions, distance)"`.
"""
return "Parallelepiped({0!s}, {1!s}, {2!s})".format(self.center,\
self.face_directions, self.distances)
def _in_region(self, point):
"""
Finds if the given point is in the region defined by this
ParallelepipedDistribution.
Parameters
----------
point : numpy.ndarray
the point to test for inclusion
Returns
-------
containment : bool
True if point in region, False otherwise
"""
if type(point) not in sequence_types:
raise ValueError('point given to log_value was not of an ' +\
'array-like type.')
arrpoint = np.array(point)
if (arrpoint.ndim != 1) or (len(arrpoint) != self.numparams):
raise ValueError('The point given is either of the wrong ' +\
'direction or the wrong length.')
from_center = arrpoint - self.center
return_val = True
for i in range(self.numparams):
dotp = np.dot(from_center, self.face_directions.A[i,:])
return_val =\
(return_val and (np.abs(dotp) <= np.abs(self.distances[i])))
return return_val
def __eq__(self, other):
"""
Checks for equality of this `ParallelepipedDistribution` with `other`.
Parameters
----------
other : object
object to check for equality
Returns
-------
result : bool
True if and only if `other` is a `ParallelepipedDistribution` with
the same `ParallelepipedDistribution.center`,
`ParallelepipedDistribution.face_directions`, and
`ParallelepipedDistribution.distances`
"""
if isinstance(other, ParallelepipedDistribution):
tol_kwargs = {'rtol': 1e-9, 'atol': 0.}
center_close = np.allclose(self.center, other.center, **tol_kwargs)
face_directions_close = np.allclose(self.face_directions.A,\
other.face_directions.A, **tol_kwargs)
distances_close =\
np.allclose(self.distances, other.distances, **tol_kwargs)
metadata_equal = self.metadata_equal(other)
return all([center_close, face_directions_close, distances_close,\
metadata_equal])
else:
return False
@property
def can_give_confidence_intervals(self):
"""
Multivariate distributions do not support confidence intervals.
"""
return False
@property
def minimum(self):
"""
The minimum allowable value(s) in this distribution.
"""
return self.center - np.abs(self.vertex - self.center)
@property
def maximum(self):
"""
The maximum allowable value(s) in this distribution.
"""
return self.center + np.abs(self.vertex - self.center)
@property
def is_discrete(self):
"""
Boolean describing whether this distribution is discrete (True) or
continuous (False).
"""
return False
def fill_hdf5_group(self, group, center_link=None,\
face_directions_link=None, distances_link=None, save_metadata=True):
"""
Fills the given hdf5 file group with data about this
`ParallelepipedDistribution` so that it can be loaded later.
Parameters
----------
group : h5py.Group
hdf5 file group to fill
center_link : str or h5py.Dataset or None
link to mean in hdf5 file, if it exists
face_directions_link : str or h5py.Dataset or None
link to face_directions in hdf5 file, if it exists
distances_link : str or h5py.Dataset or None
link to distances to faces in hdf5 file, if it exists
save_metadata : bool
- if True, attempts to save metadata alongside distribution and
throws error if it fails
- if False, metadata is ignored in saving process
"""
group.attrs['class'] = 'ParallelepipedDistribution'
create_hdf5_dataset(group, 'center', data=self.center,\
link=center_link)
create_hdf5_dataset(group, 'face_directions',\
data=self.face_directions, link=face_directions_link)
create_hdf5_dataset(group, 'distances', data=self.distances,\
link=distances_link)
if save_metadata:
self.save_metadata(group)
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a `ParallelepipedDistribution` from the given hdf5 file group.
Parameters
----------
group : h5py.Group
the same hdf5 file group which fill_hdf5_group was called on when
this Distribution was saved
Returns
-------
distribution : `ParallelepipedDistribution`
distribution created from the information in the given group
"""
try:
assert group.attrs['class'] == 'ParallelepipedDistribution'
except:
raise TypeError("The given hdf5 file doesn't seem to contain a " +\
"ParallelepipedDistribution.")
metadata = Distribution.load_metadata(group)
center = get_hdf5_value(group['center'])
face_directions = get_hdf5_value(group['face_directions'])
distances = get_hdf5_value(group['distances'])
return ParallelepipedDistribution(center, face_directions, distances,\
metadata=metadata)
@property
def gradient_computable(self):
"""
Boolean describing whether the gradient of the given distribution has
been implemented. If True,
`ParallelepipedDistribution.gradient_of_log_value` method can be called
safely.
"""
return True
def gradient_of_log_value(self, point):
"""
Computes the gradient (derivative) of the logarithm of the value of
this `ParallelepipedDistribution` at the given point.
Parameters
----------
point : `numpy.ndarray`
if this distribution describes \\(p\\) parameters, `point` should
be a length-\\(p\\) `numpy.ndarray`
Returns
-------
value : `numpy.ndarray`
gradient of the natural logarithm of the value of this
distribution. If \\(f\\) is this distribution's PDF and \\(x\\) is
`point`, then `value` is
\\(\\boldsymbol{\\nabla}\\ln{\\big(f(x)\\big)}\\) as a 1D
`numpy.ndarray` of length \\(p\\)
"""
return
|
np.zeros((self.numparams,))
|
numpy.zeros
|
import numpy as np
from typing import Tuple, Union
class Matern:
def __init__(self, nu: float, scale: float, period: float = 2 * np.pi):
if nu not in [1 / 2, 3 / 2, 5 / 2, 7 / 2]:
raise ValueError('Parameter nu must be a half integer among [1/2,3/2,5/2,7/2].')
else:
self.nu = nu
self.scale = scale
self.period = period
self.max = self.matern_function(0)
def r(self, t: np.ndarray) -> np.ndarray:
return np.sqrt(2 - 2 * np.cos(2 * np.pi * t / self.period))
def matern_function(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
if self.nu == (1 / 2):
return np.exp(-r)
elif self.nu == (3 / 2):
return (1 + np.sqrt(3) * r) * np.exp(-np.sqrt(3) * r)
elif self.nu == (5 / 2):
return (1 + np.sqrt(5) * r + 5 * (r ** 2) / 3) * np.exp(-np.sqrt(5) * r)
elif self.nu == (7 / 2):
return (1 + np.sqrt(7) * r + 42 * (r ** 2) / 15
+ 7 * np.sqrt(7) * (r ** 3) / 15) * np.exp(-np.sqrt(7) * r)
def __call__(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return self.matern_function(self.r(t) / self.scale) / self.max
class MissingWendland:
def __init__(self, mu_alpha: Tuple[int, float], scale: float, period: float = 2 * np.pi, zero=1e-12):
if mu_alpha not in [(2, 1 / 2), (3, 3 / 2), (4, 5 / 2)]:
raise ValueError('The tuple mu_alpha must be one of [(2,1/2), (3,3/2), (4,5/2)].')
else:
self.mu_alpha = mu_alpha
self.scale = scale
self.period = period
self.zero = zero
self.max = self.missing_wendland_function(self.zero)
def r(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return np.sqrt(2 - 2 * np.cos(2 * np.pi * t / self.period))
def S(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return np.sqrt(1 - r ** 2)
def L(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return np.log(r / (1 + self.S(r)))
# def P(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
# return 3465 * (r ** 12) + 83160 * (r ** 10) + 13860 * (r ** 8)
#
# def Q(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
# return 37495 * (r ** 10) + 160290 * (r ** 8) + 33488 * (r ** 6) - 724 * (r ** 4) + 1344 * (r ** 2) - 128
def missing_wendland_function(self, r: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
if self.mu_alpha == (2, 1 / 2):
return 3 * (r ** 2) * self.L(r) + (2 * (r ** 2) + 1) * self.S(r)
elif self.mu_alpha == (3, 3 / 2):
return -(15 * r ** 6 + 90 * r ** 4) * self.L(r) - (81 * r ** 4 + 28 * r ** 2 - 4) * self.S(r)
elif self.mu_alpha == (4, 5 / 2):
return (945 * r ** 8 + 2520 * r ** 6) * self.L(r) + (
256 * r ** 8 + 2639 * r ** 6 + 690 * r ** 4 - 136 * r ** 2 + 16) * self.S(r)
# elif self.mu_alpha == (5, 7 / 2):
# return -self.P(r) * self.L(r) - self.Q(r) * self.S(r)
def __call__(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
t =
|
np.asarray(t)
|
numpy.asarray
|
"""
Copyright 2019-2020, the e-prop team:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
from the Institute for theoretical computer science, TU Graz, Austria.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
# This file is modified from https://github.com/IGITUGraz/eligibility_propagation/blob/master/Figure_2_TIMIT/tools.py
import json
import os
import pickle
import numpy as np
import numpy.random as rd
def pad_vector(v, n_time, pad_value=0.):
if len(v.shape) == 2:
shp = v.shape
return np.concatenate([v, pad_value * np.ones((n_time - shp[0], shp[1]))], axis=0)
elif len(v.shape) == 1:
shp = v.shape
return np.concatenate([v, pad_value * np.zeros((n_time - shp[0],))], axis=0)
def sparsity_dense_vector(vector, blank_symbol):
indices = []
values = []
d_vector = np.diff(vector)
change_indices = np.where(d_vector != 0)[0]
last_value = blank_symbol
for ind in change_indices:
value = vector[ind]
indices.append(ind)
values.append(value)
# last_value=value
# last_v = blank_symbol
# for v in values:
# assert v != blank_symbol, 'v: {}'.format(blank_symbol)
# assert v != last_v, 'v {} last_v {} '.format(v,last_v)
# last_v = v
return np.array(indices, dtype=np.int), np.array(values, dtype=np.int)
def label_stack_to_sparse_tensor(label_stack, blank_symbol):
sparse_tensor = {'indices': [], 'values': []}
for i_batch, phns in enumerate(label_stack):
indices, values = sparsity_dense_vector(phns, blank_symbol)
sparse_tensor['indices'].append([[i_batch, i_time] for i_time in indices])
sparse_tensor['values'].append(values)
sparse_tensor['indices'] = np.concatenate(sparse_tensor['indices'])
sparse_tensor['values'] = np.concatenate(sparse_tensor['values'])
return sparse_tensor
class TimitDataset:
def __init__(self, n_mini_batch, data_path='../datasets/timit_processed', preproc=None,
use_reduced_phonem_set=True, return_sparse_phonem_tensor=False, epsilon=1e-10):
assert preproc is not None
self.data_path = data_path
self.preproc = preproc
self.use_reduced_phonem_set = use_reduced_phonem_set
self.return_sparse_phn_tensor = return_sparse_phonem_tensor
self.epsilon = epsilon
self.n_feats = {
'fbank': 41 * 3,
'mfccs': 13 * 3,
'htk': 13 * 3 if 'htk_mfcc' in data_path else 41 * 3,
# 'mfccspike': 13 * 31,
# 'melspec': 16 * 3,
# 'melspike': 496,
'cochspec': 86 * 3,
'cochspike': 86
}
self.n_features = self.n_feats[preproc]
self.n_phns = 39 if use_reduced_phonem_set else 61
# Load features from files
self.feature_stack_train, self.phonem_stack_train, self.meta_data_train, _, _ = self.load_data_stack('train')
self.feature_stack_test, self.phonem_stack_test, self.meta_data_test, self.vocabulary, self.wav_test = \
self.load_data_stack('test')
self.feature_stack_develop, self.phonem_stack_develop, self.meta_data_develop, self.vocabulary, self.wav_val = \
self.load_data_stack('develop')
def add_derivatives(features):
n_features = features[0].shape[1]
# add derivatives:
get_delta = lambda v: np.concatenate([np.zeros((1, v.shape[1])), v[2:] - v[:-2], np.zeros((1, v.shape[1]))],
axis=0)
d_features = [get_delta(f) for f in features]
d2_features = [get_delta(f) for f in d_features]
features = [np.concatenate([f, d_f, d2_f], axis=1) for f, d_f, d2_f in
zip(features, d_features, d2_features)]
assert (features[0].shape[1] == self.n_features)
return features
if self.preproc not in ['cochspike', 'htk']:
self.feature_stack_train = add_derivatives(self.feature_stack_train)
self.feature_stack_test = add_derivatives(self.feature_stack_test)
self.feature_stack_develop = add_derivatives(self.feature_stack_develop)
# normalize the features
concatenated_training_features = np.concatenate(self.feature_stack_train, axis=0)
means = np.mean(concatenated_training_features, axis=0)
stds = np.std(concatenated_training_features, axis=0)
if self.preproc != 'cochspike':
self.feature_stack_train = [(f - means) / np.maximum(stds, self.epsilon) for f in self.feature_stack_train]
self.feature_stack_test = [(f - means) / np.maximum(stds, self.epsilon) for f in self.feature_stack_test]
self.feature_stack_develop = [(f - means) / np.maximum(stds, self.epsilon) for f in
self.feature_stack_develop]
self.feature_stack_train = np.array(self.feature_stack_train, dtype=object)
self.feature_stack_test = np.array(self.feature_stack_test, dtype=object)
self.feature_stack_develop = np.array(self.feature_stack_develop, dtype=object)
assert (len(self.vocabulary) == self.n_phns)
self.n_mini_batch = n_mini_batch
# n_train_total = len(self.feature_stack_train)
# self.n_validation = int(proportion_validation * n_train_total)+1
# self.n_train = n_train_total - self.n_validation
self.n_train = len(self.feature_stack_train)
self.n_test = len(self.feature_stack_test)
self.n_develop = len(self.feature_stack_develop)
print('Dataset sizes: test {} \t train {} \t validation {}'.format(self.n_test, self.n_train, self.n_develop))
# self.mini_batch_indices,self.validation_indices = self.generate_mini_batch_selection_list()
self.mini_batch_indices = self.generate_mini_batch_selection_list()
self.current_epoch = 0
self.index_current_minibatch = 0
def reduce_phonem_list(self, phn_list):
return [self.phonem_reduction_map[k] for k in phn_list]
def generate_mini_batch_selection_list(self):
# perm = rd.permutation(self.n_train + self.n_validation)
# number_of_batches = self.n_train // self.n_mini_batch
# training_set = perm[:self.n_train]
# validation_set = perm[self.n_train:]
perm = rd.permutation(self.n_train)
number_of_batches = self.n_train // self.n_mini_batch
return np.array_split(perm, number_of_batches) # ,validation_set
def load_data_stack(self, dataset):
path = os.path.join(self.data_path, dataset)
# Define the link to the pickle objects
if self.preproc == 'fbank':
feature_path = os.path.join(path, 'filter_banks.pickle')
elif self.preproc == 'mfccs':
feature_path = os.path.join(path, 'mfccs.pickle')
elif self.preproc == 'htk':
feature_path = os.path.join(path, 'htk.pickle')
# elif self.preproc == 'mfccspike':
# feature_path = os.path.join(path, 'mfcc_spike_stack.pickle')
# elif self.preproc == 'melspec':
# feature_path = os.path.join(path, 'specgram.pickle')
# elif self.preproc == 'melspike':
# feature_path = os.path.join(path, 'spike.pickle')
elif self.preproc == 'cochspec':
feature_path = os.path.join(path, 'coch_raw.pickle')
elif self.preproc == 'cochspike':
feature_path = os.path.join(path, 'coch_spike.pickle')
else:
raise NotImplementedError('Preprocessing %s not available' % self.preproc)
if self.use_reduced_phonem_set:
phonem_path = os.path.join(path, 'reduced_phonems.pickle')
vocabulary_path = os.path.join(path, 'reduced_phonem_list.json')
else:
phonem_path = os.path.join(path, 'phonems.pickle')
vocabulary_path = os.path.join(path, 'phonem_list.json')
# Load the data
with open(feature_path, 'rb') as f:
data_stack = np.array(pickle.load(f), dtype=object)
with open(phonem_path, 'rb') as f:
phonem_stack = np.array(pickle.load(f), dtype=object)
for phns in phonem_stack:
assert ((np.array(phns) < self.n_phns).all()), 'Found phonems up to {} should be maximum {}'.format(
np.max(phns), self.n_phns)
# Load the vocabulay
with open(vocabulary_path, 'r') as f:
vocabulary = json.load(f)
assert vocabulary[0] == ('sil' if self.use_reduced_phonem_set else 'h#')
self.silence_symbol_id = 0
# Load meta data
with open(os.path.join(path, 'metadata.pickle'), 'rb') as f:
metadata = pickle.load(f)
assert vocabulary[0] == ('sil' if self.use_reduced_phonem_set else 'h#')
self.silence_symbol_id = 0
with open(os.path.join(path, 'reduced_phn_index_mapping.json'), 'r') as f:
self.phonem_reduction_map = json.load(f)
# Load raw audio
wav_path = os.path.join(path, 'wav.pickle')
with open(wav_path, 'rb') as f:
wav_stack = np.array(pickle.load(f), dtype=object)
return data_stack, phonem_stack, metadata, vocabulary, wav_stack
def load_features(self, dataset, selection):
if dataset == 'train':
feature_stack = self.feature_stack_train[selection]
phonem_stack = self.phonem_stack_train[selection]
wavs = None
elif dataset == 'test':
feature_stack = self.feature_stack_test[selection]
phonem_stack = self.phonem_stack_test[selection]
wavs = self.wav_test[selection]
elif dataset == 'develop':
feature_stack = self.feature_stack_develop[selection]
phonem_stack = self.phonem_stack_develop[selection]
wavs = self.wav_val[selection]
seq_len = [feature.shape[0] for feature in feature_stack]
n_time = np.max([feature.shape[0] for feature in feature_stack])
features = np.stack([pad_vector(feature, n_time) for feature in feature_stack], axis=0)
if self.return_sparse_phn_tensor:
phns = label_stack_to_sparse_tensor(phonem_stack, self.silence_symbol_id)
else:
phns = np.stack([pad_vector(phns, n_time, self.silence_symbol_id) for phns in phonem_stack], axis=0)
return features, phns, seq_len, wavs
def get_next_training_batch(self):
features, phns, seq_len, _ = self.load_features('train',
selection=self.mini_batch_indices[self.index_current_minibatch])
self.index_current_minibatch += 1
if self.index_current_minibatch >= len(self.mini_batch_indices):
self.index_current_minibatch = 0
self.current_epoch += 1
# Shuffle the training set after each epoch
number_of_batches = len(self.mini_batch_indices)
training_set_indices = np.concatenate(self.mini_batch_indices)
training_set_indices = rd.permutation(training_set_indices)
self.mini_batch_indices = np.array_split(training_set_indices, number_of_batches)
if not self.return_sparse_phn_tensor:
check = (phns < self.n_phns).all()
else:
check = (phns['values'] < self.n_phns).all()
assert check, 'Found phonems up to {} should be maximum {}'.format(np.max(phns), self.n_phns)
return features, phns, seq_len, np.zeros((1, 1))
def get_train_batch(self):
return self.load_features('train', np.arange(self.n_train, dtype=np.int))
def get_test_batch(self):
return self.load_features('test', np.arange(self.n_test, dtype=np.int))
def get_next_test_batch(self, selection):
features, phns, seq_len, wavs = \
self.load_features('test', selection=selection)
if not self.return_sparse_phn_tensor:
check = (phns < self.n_phns).all()
else:
check = (phns['values'] < self.n_phns).all()
assert check, 'Found phonems up to {} should be maximum {}'.format(np.max(phns), self.n_phns)
return features, phns, seq_len, wavs
def get_validation_batch(self):
return self.load_features('develop', np.arange(self.n_develop, dtype=np.int))
def get_next_validation_batch(self, selection):
features, phns, seq_len, wavs = \
self.load_features('develop', selection=selection)
if not self.return_sparse_phn_tensor:
check = (phns < self.n_phns).all()
else:
check = (phns['values'] < self.n_phns).all()
assert check, 'Found phonems up to {} should be maximum {}'.format(
|
np.max(phns)
|
numpy.max
|
from scipy import misc
import tqdm
import pickle
import os
import numpy as np
def eprint(*args):
_str = " ".join([str(arg) for arg in args])
sys.stderr.write("%s\n" % _str)
def load_english_hnd():
# image names are of the form: data/English/Hnd/Img/Sample001/img001-001.png
fldr = "data/English/Hnd/Img"
NUM_CLASSES = 59
NUM_USERS = 55
IMAGE_SIZE = 32
images, labels, uids = [], [], []
width, height = IMAGE_SIZE, IMAGE_SIZE
MAX_NUM_DOMAINS = NUM_USERS
uid = 0
cache_fname = 'data/english_hnd.pkl'
if os.path.exists(cache_fname):
images, labels, uids = pickle.load(open(cache_fname, "rb"))
else:
for label in tqdm.tqdm(range(NUM_CLASSES)):
label_fldr = "%s/Sample%03d" % (fldr, label+1)
if not os.path.exists(label_fldr):
continue
for fname in os.listdir(label_fldr):
uid = int(fname.split('-')[1][:-4]) - 1
img = misc.imread(label_fldr + "/" + fname, flatten=True)
img = misc.imresize(img, (height, width))
img = img.astype(np.float32)
img = misc.bytescale(img)
img = img.astype(np.uint8)
assert np.max(img) <= 255 and np.min(img) >= 0, "Max and min of image: %f %f" % (np.max(img), np.min(img))
img = (img-128.)/128.
assert np.max(img) != np.min(img)
images.append(img)
labels.append(label)
uids.append(uid)
pickle.dump((images, labels, uids), open(cache_fname, "wb"))
print ("Labels: %s uids: %s" % (labels[:10], uids[:10]))
print ("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
print ("Test images: ", np.max(images[0]), np.min(images[0]))
print ("Read %d examples" % len(images))
images, labels, uids = np.array(images), np.array(labels), np.array(uids)
test_idxs = np.where(uids >= NUM_USERS - 15)
train_idxs = np.where(uids <= NUM_USERS - 25)
dev_idxs = np.intersect1d(np.where(uids > NUM_USERS - 25), np.where(uids < NUM_USERS - 15))
train = (images[train_idxs], labels[train_idxs], uids[train_idxs])
dev = (images[dev_idxs], labels[dev_idxs], uids[dev_idxs])
test = (images[test_idxs], labels[test_idxs], uids[test_idxs])
return (train, dev, dev, test)
def load_english_fnt():
# image names are of the form: data/English/Fnt/Img/Sample001/img001-00078.png
fldr = "data/English/Fnt"
NUM_CLASSES = 62
NUM_USERS = 1016
IMAGE_SIZE = 32
images, labels, uids = [], [], []
width, height = IMAGE_SIZE, IMAGE_SIZE
MAX_NUM_DOMAINS = NUM_USERS
uid = 0
cache_fname = 'data/english_fnt.pkl'
if os.path.exists(cache_fname):
images, labels, uids = pickle.load(open(cache_fname, "rb"))
else:
for label in tqdm.tqdm(range(NUM_CLASSES)):
label_fldr = "%s/Sample%03d" % (fldr, label + 1)
if not os.path.exists(label_fldr):
continue
for fname in os.listdir(label_fldr):
uid = int(fname.split('-')[1][:-4]) - 1
img = misc.imread(label_fldr + "/" + fname, flatten=True)
img = misc.imresize(img, (height, width))
img = img.astype(np.float32)
img = misc.bytescale(img)
img = img.astype(np.uint8)
assert np.max(img) <= 255 and np.min(img) >= 0, "Max and min of image: %f %f" % (np.max(img), np.min(img))
img = (img-128.)/128.
assert np.max(img) != np.min(img)
images.append(img)
labels.append(label)
uids.append(uid)
pickle.dump((images, labels, uids), open(cache_fname, "wb"))
print ("Labels: %s uids: %s" % (labels[:10], uids[:10]))
print ("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
print ("Test images: ", np.max(images[0]), np.min(images[0]))
print ("Read %d examples" % len(images))
images, labels, uids = np.array(images), np.array(labels), np.array(uids)
test_idxs = np.where(uids >= NUM_USERS - 100)
train_idxs = np.where(uids <= NUM_USERS - 500)
dev_idxs = np.intersect1d(np.where(uids > NUM_USERS - 200),
|
np.where(uids < NUM_USERS - 100)
|
numpy.where
|
# Plotting program
import matplotlib.pyplot as plt
# Search for files
from glob import glob
# Good for reading/writing data tables
import pandas as pd
# Better math, numbers, and array functions
import numpy as np
from astropy.timeseries import LombScargle
from astropy.io import fits
# You have to give permission to the notebook to access files stored in your Google Drive.
from google.colab import drive
drive.mount('/content/gdrive',force_remount=True)
import os
import sys
############# If you move these programs you will need to update these directories and names #############
sys.path.append('/content/gdrive/Shareddrives/')
from tess_check import myDir as myDir
def findel(val, array):
array = np.array(array)
adiff = np.abs(array-val)
id = np.argmin(adiff)
return id
# Download TESS
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
import astropy.units as u
# CPM modules
from google.colab import drive
drive.mount('/content/gdrive',force_remount=True)
sys.path.append('/content/gdrive/Shareddrives/DouglasGroup/tess_check/')
from tess_check import tess_cpm as tess_cpm
from tess_check import tesscheck as tesscheck
# SAP modules
from photutils import CircularAperture, CircularAnnulus, aperture_photometry
from astropy.stats import sigma_clipped_stats
# Make sheet
def make_sheet(project, file):
# read file
# organize data
# generate sheet
columns = ['DR2Name','RA','Dec','Gmag','BP_RP','Prot','Prot_LS','Power_LS','TESS_Data','Notes']
# Load sheet
def load_sheet(project_name):
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
dir_project = myDir.project_dir(project_name)
sheet_id_file = os.path.join(dir_project,f"{project_name}.txt")
f = open(sheet_id_file,"r")
doc_id = f.readline()
f.close()
sheet = gc.open_by_key(doc_id)
return sheet
def download_tess(sample_name,mag_cut=16.0,gbr_cut=0.5):
# Check for directories
# Main level
dir_project = myDir.project_dir(sample_name)
levels = ['','/FFI','/CPM','/Plots','/Panels','/SAP']
for level in levels:
path_level = dir_project+level
dir_level = glob(path_level)
if
|
np.size(dir_level)
|
numpy.size
|
"""
Most of the functions needed by the web service are here.
In seekpath_app.py we just keep the main web logic.
"""
import copy
import io
import json
import time
import traceback
import numpy as np
import jinja2
import spglib # Mainly to get its version
from ase.data import chemical_symbols
from tools_barebone import logme, get_tools_barebone_version
from tools_barebone.structure_importers import get_structure_tuple, UnknownFormatError
# Version of tools-seekpath
__version__ = "21.05.0"
class FlaskRedirectException(Exception):
"""
Class used to return immediately with a flash message and a redirect.
"""
MAX_NUMBER_OF_ATOMS = 1000
time_reversal_note = (
"The second half of the path is required only if "
"the system does not have time-reversal symmetry"
)
def get_json_for_visualizer(
cell, relcoords, atomic_numbers, seekpath_module
): # pylint: disable=too-many-locals
# from seekpath_module import hpkot, brillouinzone
hpkot = seekpath_module.hpkot
brillouinzone = seekpath_module.brillouinzone
system = (np.array(cell), np.array(relcoords), np.array(atomic_numbers))
res = hpkot.get_path(system, with_time_reversal=False)
real_lattice = res["primitive_lattice"]
# rec_lattice = np.linalg.inv(real_lattice).T # Missing 2pi!
rec_lattice = np.array(hpkot.tools.get_reciprocal_cell_rows(real_lattice))
b1, b2, b3 = rec_lattice
faces_data = brillouinzone.brillouinzone.get_BZ(b1=b1, b2=b2, b3=b3)
response = {}
response["faces_data"] = faces_data
response["b1"] = b1.tolist()
response["b2"] = b2.tolist()
response["b3"] = b3.tolist()
## Convert to absolute
response["kpoints"] = {
k: (v[0] * b1 + v[1] * b2 + v[2] * b3).tolist()
for k, v in res["point_coords"].items()
}
response["kpoints_rel"] = {
k: [v[0], v[1], v[2]] for k, v in res["point_coords"].items()
}
response["path"] = res["path"]
# It should use the same logic, so give the same cell as above
res_explicit = seekpath_module.get_explicit_k_path(system, with_time_reversal=False)
for k in res_explicit:
if k == "segments" or k.startswith("explicit_"):
if isinstance(res_explicit[k], np.ndarray):
response[k] = res_explicit[k].tolist()
else:
response[k] = res_explicit[k]
if (
np.sum(
np.abs(
np.array(res_explicit["reciprocal_primitive_lattice"])
- np.array(res["reciprocal_primitive_lattice"])
)
)
> 1.0e-7
):
raise AssertionError("Got different reciprocal cells...")
# Response for JS, and path_results
return response, res
def process_structure_core( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments
filecontent,
fileformat,
seekpath_module,
call_source="",
logger=None,
flask_request=None,
):
"""
The main function that generates the data to be sent back to the view.
:param filecontent: The file content (string)
:param fileformat: The file format (string), among the accepted formats
:param seekpath_module: the seekpath module. The reason for passing it
is that, when running in debug mode, you want to get the local
seekpath rather than the installed one.
:param call_source: a string identifying the source (i.e., who called
this function). This is a string, mainly for logging reasons.
:param logger: if not None, should be a valid logger, that is used
to output useful log messages.
:param flask_request: if logger is not None, pass also the flask.request
object to help in logging.
:return: this function calls directly flask methods and returns flask
objects
:raise: FlaskRedirectException if there is an error that requires
to redirect the the main selection page. The Exception message
is the message to be flashed via Flask (or in general shown to
the user).
"""
start_time = time.time()
fileobject = io.StringIO(str(filecontent))
form_data = dict(flask_request.form)
try:
structure_tuple = get_structure_tuple(
fileobject, fileformat, extra_data=form_data
)
except UnknownFormatError:
logme(
logger,
filecontent,
fileformat,
flask_request,
call_source,
reason="unknownformat",
extra={"form_data": form_data,},
)
raise FlaskRedirectException("Unknown format '{}'".format(fileformat))
except Exception:
# There was an exception...
logme(
logger,
filecontent,
fileformat,
flask_request,
call_source,
reason="exception",
extra={"traceback": traceback.format_exc(), "form_data": form_data,},
)
raise FlaskRedirectException(
"I tried my best, but I wasn't able to load your "
"file in format '{}'...".format(fileformat)
)
if len(structure_tuple[1]) > MAX_NUMBER_OF_ATOMS:
## Structure too big
logme(
logger,
filecontent,
fileformat,
flask_request,
call_source,
reason="toolarge",
extra={"number_of_atoms": len(structure_tuple[1]), "form_data": form_data,},
)
raise FlaskRedirectException(
"Sorry, this online visualizer is limited to {} atoms "
"in the input cell, while your structure has {} atoms."
"".format(MAX_NUMBER_OF_ATOMS, len(structure_tuple[1]))
)
# Log the content in case of valid structure
logme(
logger,
filecontent,
fileformat,
flask_request,
call_source,
reason="OK",
extra={"number_of_atoms": len(structure_tuple[1]), "form_data": form_data,},
)
try:
in_json_data = {
"cell": structure_tuple[0],
"scaled_coords": structure_tuple[1],
"atomic_numbers": structure_tuple[2],
}
out_json_data, path_results = get_json_for_visualizer(
in_json_data["cell"],
in_json_data["scaled_coords"],
in_json_data["atomic_numbers"],
seekpath_module=seekpath_module,
)
raw_code_dict = copy.copy(out_json_data)
for k in list(raw_code_dict.keys()):
if k.startswith("explicit_"):
raw_code_dict.pop(k)
if k == "segments":
raw_code_dict.pop(k)
raw_code_dict.pop("faces_data")
raw_code_dict["primitive_lattice"] = path_results["primitive_lattice"].tolist()
raw_code_dict["primitive_positions"] = path_results[
"primitive_positions"
].tolist()
inputstructure_positions_cartesian = np.dot(
np.array(in_json_data["scaled_coords"]), np.array(in_json_data["cell"]),
).tolist()
primitive_positions_cartesian = np.dot(
np.array(path_results["primitive_positions"]),
np.array(path_results["primitive_lattice"]),
).tolist()
primitive_positions_cartesian_refolded = np.dot(
np.array(path_results["primitive_positions"]) % 1.0,
|
np.array(path_results["primitive_lattice"])
|
numpy.array
|
"""
queryEval.py
DO NOT MODIFY ANY CODES IN THIS FILE
OTHERWISE YOUR RESULTS MAY BE INCORRECTLY EVALUATED!
@author: <NAME>, 2017
For questions or bug reporting, please send an email to <EMAIL>
"""
import os
import cv2
import numpy as np
import pickle
import sys, getopt
import matplotlib.pyplot as plt
from computeFeatures import computeFeatures
from computeDistances import computeDistances
# EDIT THIS TO YOUR OWN PATH IF DIFFERENT
dbpath = os.getcwd() + '/../fooddb/'
# these labels are the abbreviations of the actual food names
labels = ('AK','BL','CD','CL','DR','MG','NL','PG','RC','ST')
# Read command line args
myopts, args = getopt.getopt(sys.argv[1:],"d:q:h")
# parsing command line args
for o, a in myopts:
if o == '-d':
queryfile = os.path.join(dbpath, a + '.jpg')
gt_idx = np.uint8(np.floor(int(a)/100))
if not os.path.isfile(queryfile):
print("Error: Query file does not exist! Please check.")
sys.exit()
elif o == '-q':
queryfile = a
if not os.path.isfile(queryfile):
print("Error: Query file does not exist! Please check.")
sys.exit()
# tokenize filename to get category label and index
gt = str(queryfile.split("_")[1]).split(".")[0]
gt_idx = labels.index(gt)
elif o == '-h':
print("\nUsage: %s -d dbfilenumber\n # to specify a single query image from the database for evaluation" % sys.argv[0])
print("\n %s -q queryfile\n # to specify a new query image for evaluation" % sys.argv[0])
print(" ")
sys.exit()
else:
print(' ')
featvect = [] # empty list for holding features
FEtime = np.zeros(1000)
# load pickled features
fv = pickle.load(open("feat.pkl", "rb") )
print('Features loaded')
# read query image file
img = cv2.imread(queryfile)
query_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# show stuff
plt.imshow(query_img), plt.title('Query image: %s'%labels[gt_idx])
plt.xticks([]), plt.yticks([])
print('Query image: %s'%labels[gt_idx])
# compute features
newfeat = computeFeatures(query_img)
# insert new feat to the top of the feature vector stack
fv = np.insert(fv, 0, newfeat, axis=0)
# find all pairwise distances
D = computeDistances(fv)
# *** Evaluation ----------------------------------------------------------
# number of images to retrieve
nRetrieved = 100
# access distances of all images from query image (first image), sort them asc
nearest_idx = np.argsort(D[0, :]);
# quick way of finding category label for top K retrieved images
retrievedCats = np.uint8(np.floor((nearest_idx[1:nRetrieved+1])/100));
# find matches
hits_q = (retrievedCats == gt_idx)
# calculate average precision of the ranked matches
if np.sum(hits_q) != 0:
avg_prec_q = np.sum(hits_q*np.cumsum(hits_q)/(np.arange(nRetrieved)+1)) / np.sum(hits_q)
else:
avg_prec_q = 0.0
recall =
|
np.sum(hits_q)
|
numpy.sum
|
from .elm import ELM
import numpy as np
from ..util import solver, accuracy, disagreement
class BaggingStepwiseELM(ELM):
"""
Bagging Stepwise ELM Ensemble.
"""
__name__ = "Bagging Stepwise ELM Ensemble"
size: int
m: int = 3
prop: float = 0.75
def fit(self, train_data, train_target, parameter):
"""
Use some train (data and target) and parameter to
fit the classifier and construct the rules.
:param numpy.array train_data: data with features.
:param numpy.array train_target: targets in j codification.
:param dict parameter:
"""
self.instance_param_(train_data=train_data,
train_target=train_target,
parameter=parameter)
self.get_weight_bias_()
self.output_weight = np.zeros((self.size, self.hidden_neurons, self.Y.shape[1]))
# Train the model
h_matrix = self.get_h_matrix(data=train_data)
self.output_weight[0] = self.fit_step(h=h_matrix, y=self.Y)
acc = accuracy(clf=self,
pred_data=train_data,
real_targ=train_target)
dis = 1.0
expected_size = self.size
removed = list()
for s in range(1, self.size):
# Random subset
length = int(self.prop * self.n)
index = np.random.choice(self.n, length)
data_d = train_data[index]
y_d = self.Y[index]
h_matrix_d = self.get_h_matrix(data=data_d)
# Train the model
self.output_weight[s] = self.fit_step(h=h_matrix_d, y=y_d)
new_acc = accuracy(clf=self,
pred_data=train_data,
real_targ=train_target)
new_dis = disagreement(clf=self,
pred_data=train_data,
real_targ=train_target,
S=s+1)
if new_acc < acc or new_dis > dis:
self.output_weight[s] = np.zeros(( self.hidden_neurons, self.Y.shape[1]))
expected_size -= 1
removed.append(s)
else:
dis = new_dis
acc = new_acc
self.output_weight = np.delete(self.output_weight, removed, axis=0)
self.size = expected_size
def fit_step(self, h, y):
"""
Fit with part of the data from the whole set.
:param h:
:param y:
:return:
"""
left = np.eye(h.shape[1]) + self.reg * np.dot(h.T, h)
right =
|
np.dot(h.T, y)
|
numpy.dot
|
import numpy as np
from random import randint
from random import shuffle
import pandas as pd
class CellData:
def __init__(self, data_file, test_fold, cell_types="MCF7,PC3", pert_type="trt_cp", revision=False):
if revision:
data, meta, all_pert_ids = self.parse_data_split(data_file)
else:
data, meta, all_pert_ids = self.parse_data(data_file, cell_types, pert_type)
train_data, train_meta, test_data, test_meta, val_data, val_meta,\
cell_types, train_perts, val_perts, test_perts = self.split_data(data, meta, all_pert_ids, test_fold)
meta_dictionary_pert = {}
for pert_id in train_perts:
meta_dictionary_pert[pert_id] = [[p, i] for i, p in enumerate(train_meta) if p[1] == pert_id]
meta_dictionary_pert_test = {}
for pert_id in test_perts:
meta_dictionary_pert_test[pert_id] = [[p, i] for i, p in enumerate(test_meta) if p[1] == pert_id]
meta_dictionary_pert_val = {}
for pert_id in val_perts:
meta_dictionary_pert_val[pert_id] = [[p, i] for i, p in enumerate(val_meta) if p[1] == pert_id]
self.train_perts = train_perts
self.test_perts = test_perts
self.train_data = train_data
self.test_data = test_data
self.train_meta = train_meta
self.test_meta = test_meta
self.cell_types = cell_types
self.all_pert_ids = all_pert_ids
self.val_data = val_data
self.val_meta = val_meta
self.meta_dictionary_pert = meta_dictionary_pert
self.meta_dictionary_pert_test = meta_dictionary_pert_test
self.meta_dictionary_pert_val = meta_dictionary_pert_val
# perts = np.loadtxt("cluster_perts.csv", delimiter=",", dtype=np.str)
# matrix = np.asarray([train_data[i] for i, p in enumerate(train_meta) if p[1] in perts])
# p1 = np.mean(matrix, axis=0)
# p2 = matrix.std(axis=0)
# utils1.draw_vectors([p1, p2], "cluster_1_info.png", names=["Mean", "SD"])
# for ct in cell_types:
# matrix = np.asarray([train_data[i] for i, p in enumerate(train_meta) if p[0] == ct])
# p1 = np.mean(matrix, axis=0)
# p2 = matrix.std(axis=0)
# utils1.draw_vectors([p1, p2], "cell_types/" + ct + "_info.png", names=["Mean", "SD"])
# utils1.draw_dist(matrix, "cell_types/" + ct + "_dist.png")
print("----------------------------------------------")
print(train_data.shape)
print(test_data.shape)
print("----------------------------------------------")
def parse_data_split(self, file):
perts = pd.read_csv(file + "/perts.csv", sep="\t", header=None, names=["Values"])['Values'].values.tolist()
cell_types = pd.read_csv(file + "/cell_types.csv", sep="\t", header=None, names=["Values"])[
'Values'].values.tolist()
pert_meta = []
data = []
for cell in cell_types:
df = pd.read_csv(file + "/tensor/" + cell + ".csv", sep=",", header=None).values
for i in range(len(df)):
if np.isnan(df[i]).any():
continue
data.append(df[i])
pert_meta.append([cell, perts[i], "trt_cp"])
data = np.asarray(data)
data = data / np.max(np.abs(data))
data = np.expand_dims(data, axis=-1)
return data, pert_meta, perts
def parse_data(self, file, cell_types, pert_type):
print("Parsing data at " + file)
df = pd.read_csv(file, sep="\t")
df.reset_index(drop=True, inplace=True)
print("Total: " + str(df.shape))
df = df[(df['pert_type'] == pert_type)]
# df = df[(df['pert_type'] == "trt_sh.cgs")]
if cell_types is not None:
df = df[df['cell_id'].isin(cell_types.split(","))]
print(df.groupby(['cell_id']).size())
print("Cell filtering: " + str(df.shape))
# df['pert_idose'] = df['pert_idose'].str.replace(' um','')
# df['pert_idose'] = df['pert_idose'].astype(float)
# df.pert_itime.value_counts()
# df.pert_idose.value_counts()
# df = df[(df['pert_itime'] == "24 h")]
# print("time filtering: " + str(df.shape))
# df = df[(df['pert_idose'] < 4)]
# print("dose filtering: " + str(df.shape))
print(df.groupby(['cell_id']).size())
df = df.groupby(['cell_id', 'pert_id', 'pert_type'], as_index=False).mean()
print("Merging: " + str(df.shape))
print(df.groupby(['cell_id']).size())
# df = df.groupby(['pert_id']).filter(lambda x: len(x) > 1)
# print("Pert filtering: " + str(df.shape))
print(df.groupby(['cell_id']).size())
# df = df.groupby(['cell_id']).filter(lambda x: len(x) > 1000)
# print("Count filtering: " + str(df.shape))
# df = df.drop_duplicates(['cell_id', 'pert_id', 'pert_idose', 'pert_itime', 'pert_type'])
print(df.groupby(['cell_id']).size())
cell_ids = df["cell_id"].values
pert_ids = df["pert_id"].values
all_pert_ids = set(pert_ids)
# pert_idose = df["pert_idose"].values
# pert_itime = df["pert_itime"].values
pert_type = df["pert_type"].values
perts = np.stack([cell_ids, pert_ids, pert_type]).transpose()
df = df.drop(['cell_id', 'pert_id', 'pert_type', 'Unnamed: 0'], 1)
data = df.values
data = data / np.max(np.abs(data))
data = np.expand_dims(data, axis=-1)
return data, perts, all_pert_ids
def split_data(self, data, meta, all_pert_ids, test_fold):
print(test_fold)
cell_types = set([meta[i][0] for i, x in enumerate(meta)])
rng_state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(rng_state)
np.random.shuffle(meta)
if test_fold is not None:
test_perts = np.loadtxt(test_fold, dtype='str')
# Special case for Hodos revision
if "," in test_perts[0]:
test_fold_data = test_perts
train_data = np.asarray(
[data[i] for i, m in enumerate(meta) if m[0] + "," + m[1] not in test_fold_data]) # and m[0] != "A375"
test_data = np.asarray([data[i] for i, m in enumerate(meta) if m[0] + "," + m[1] in test_fold_data])
train_meta = np.asarray(
[m for i, m in enumerate(meta) if m[0] + "," + m[1] not in test_fold_data]) # and m[0] != "A375"
test_meta = np.asarray([m for i, m in enumerate(meta) if m[0] + "," + m[1] in test_fold_data])
split = int(0.9 * len(train_data))
val_data = train_data[split:]
val_meta = train_meta[split:]
train_data = train_data[:split]
train_meta = train_meta[:split]
test_perts = []
train_perts = []
val_perts = []
for m in train_meta:
if m[1] not in train_perts:
train_perts.append(m[1])
for m in test_meta:
if m[1] not in test_perts:
test_perts.append(m[1])
for m in val_meta:
if m[1] not in val_perts:
val_perts.append(m[1])
return train_data, train_meta, test_data, test_meta, val_data, val_meta, cell_types, train_perts, val_perts, test_perts
z = list(all_pert_ids - set(test_perts))
shuffle(z)
split = int(0.95 * len(z))
train_perts = z[:split]
val_perts = z[split:]
else:
train_perts = list(all_pert_ids)
test_perts = []
val_perts = []
# val_perts = val_perts[:min(len(val_perts), int(0.1 * len(z)))]
train_data = np.asarray(
[data[i] for i, m in enumerate(meta) if m[1] in train_perts]) # and m[0] != "A375"
test_data = np.asarray([data[i] for i, m in enumerate(meta) if m[1] in test_perts])
val_data = np.asarray([data[i] for i, m in enumerate(meta) if m[1] in val_perts])
train_meta = np.asarray(
[m for i, m in enumerate(meta) if m[1] in train_perts]) # and m[0] != "A375"
test_meta = np.asarray([m for i, m in enumerate(meta) if m[1] in test_perts])
val_meta = np.asarray([m for i, m in enumerate(meta) if m[1] in val_perts])
return train_data, train_meta, test_data, test_meta, val_data, val_meta, cell_types, train_perts, val_perts, test_perts
def get_profile(self, data, meta_data, test_pert, train_data=False):
# if train_data:
pert_list = [p[1] for p in meta_data if p[0][0] != test_pert[0]]
# else:
# pert_list = [p[1] for p in meta_data if
# p[0][0] != test_pert[0] and p[0][0] == "A375"]
if len(pert_list) > 0:
random_best = randint(0, len(pert_list) - 1)
mean_profile = np.mean(np.asarray(data[pert_list]), axis=0, keepdims=True)
return random_best, np.asarray([data[pert_list[random_best]]]), mean_profile, data[pert_list]
else:
return -1, None, None, None
def get_profile2(self, data, meta_data):
pert_list = [p[1] for p in meta_data]
if len(pert_list) > 0:
random_best = randint(0, len(pert_list) - 1)
mean_profile = np.mean(np.asarray(data[pert_list]), axis=0, keepdims=True)
return random_best,
|
np.asarray([data[pert_list[random_best]]])
|
numpy.asarray
|
#!/usr/bin/env python3
"""
Discriminative Bayesian Filtering Lends Momentum
to the Stochastic Newton Method for Minimizing Log-Convex Functions
Exhibits and tests a discriminative filtering strategy for the stochastic
(batch-based) Newton method that aims to minimize the mean of log-convex
functions using sub-sampled gradients and Hessians
Runs using the provided Dockerfile (https://www.docker.com):
```
docker build --no-cache -t hibiscus .
docker run --rm -ti -v $(pwd):/home/felixity hibiscus
```
or in a virtual environment with Python3.10:
```
python3.10 -m venv turquoise
source turquoise/bin/activate
pip3 install -r requirements.txt
python3.10 filtered_stochastic_newton.py
```
"""
from __future__ import annotations
import datetime
import functools
import inspect
import logging
import os
import platform
import re
import subprocess
import sys
import time
from typing import Callable
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize as sp_minimize
from numpy.linalg import *
class DiscriminativeKalmanFilter:
"""
Implements the Discriminative Kalman Filter as described in <NAME>.,
<NAME>., <NAME>., <NAME>., & <NAME>.'s "The
discriminative Kalman filter for Bayesian filtering with nonlinear and
nongaussian observation models." Neural Comput. 32(5), 969–1017 (2020).
"""
def __init__(
self,
stateModelA: np.mat,
stateModelGamma: np.mat,
stateModelS: np.mat,
posteriorMean: np.mat = None,
posteriorCovariance: np.mat = None,
) -> None:
"""
Specifies the
state model p(hidden_t|hidden_{t-1})
= eta_{dState}(hidden_t; stateModelA*hidden_{t-1}, stateModelGamma)
and measurement model p(hidden_t|observed_t)
= eta_{dState}(hidden_t; ft, Qt)
where ft, Qt must be supplied by the user at each time step for updates
:param stateModelA: A from eq. (2.1b)
:param stateModelGamma: Γ from eq. (2.1b)
:param stateModelS: S from eq. (2.1a)
:param posteriorMean: μ_t from eq. (2.6)
:param posteriorCovariance: Σ_t from eq. (2.6)
"""
self.stateModelA = stateModelA
self.stateModelGamma = stateModelGamma
self.stateModelS = stateModelS
self.dState = stateModelA.shape[0]
if posteriorMean is not None:
self.posteriorMean = posteriorMean
else:
self.posteriorMean = np.zeros((self.dState, 1))
if posteriorCovariance is not None:
self.posteriorCovariance = posteriorCovariance
else:
self.posteriorCovariance = self.stateModelS
def stateUpdate(self) -> None:
"""
Calculates the first 2 lines of eq. (2.7) in-place
"""
self.posteriorMean = self.stateModelA * self.posteriorMean
self.posteriorCovariance = (
self.stateModelA * self.posteriorCovariance * self.stateModelA.T
+ self.stateModelGamma
)
def measurementUpdate(self, ft: np.mat, Qt: np.mat) -> None:
"""
Given ft & Qt, calculates the last 2 lines of eq. (2.7)
:param ft: f(x_t) from eq. (2.2)
:param Qt: Q(x_t) from eq. (2.2)
:return:
"""
if not np.all(eigvals(inv(Qt) - inv(self.stateModelS)) > 1e-6):
Qt = inv(inv(Qt) + inv(self.stateModelS))
newPosteriorCovInv = (
inv(self.posteriorCovariance) + inv(Qt) - inv(self.stateModelS)
)
self.posteriorMean = solve(
newPosteriorCovInv,
solve(self.posteriorCovariance, self.posteriorMean)
+ solve(Qt, ft),
)
self.posteriorCovariance = inv(newPosteriorCovInv)
def predict(self, ft: np.mat, Qt: np.mat) -> tuple[np.mat, np.mat]:
"""
Given ft & Qt, performs stateUpdate() and measurementUpdate(ft, Qt)
:param ft: f(x_t) from eq. (2.2)
:param Qt: Q(x_t) from eq. (2.2)
:return: new posterior mean and covariance from applying eq. (2.7)
"""
self.stateUpdate()
self.measurementUpdate(ft, Qt)
return self.posteriorMean, self.posteriorCovariance
def ArmijoStyleSearch(
fn: Callable[[float], float],
t0: np.mat,
step_dir: np.mat,
grad_fn_t0: np.mat,
) -> np.mat:
"""
Implements a backtracking line search inspired by Armijo, L.'s
"Minimization of functions having Lipschitz continuous first partial
derivatives." Pacific J. Math. 16(1), 1–3 (1966).
:param fn: callable fn for which we seek a minimum
:param t0: starting point
:param step_dir: direction in which to seek a minimum of fn from t0
:param grad_fn_t0: gradient of fn at t0
:return: reasonable step length
"""
fn_x0 = fn(t0)
for k in range(5):
step_length = 2**-k
if fn(t0 + step_length * step_dir) - fn_x0 <= float(
0.95 * step_length * step_dir * grad_fn_t0.T
):
break
return step_length
def angular_distance(v1: np.array, v2: np.array) -> np.float:
"""
Returns the angle in radians between two equal-length vectors v1 and v2;
if in 2 dimensions, returns a signed angle
:param v1: first vector
:param v2: second vector
:return: angle between v1 and v2 (radians)
"""
v1_n = np.asarray(v1).ravel() / norm(v1)
v2_n = np.asarray(v2).ravel() / norm(v2)
if v1_n.size != 2:
return np.arccos(np.dot(v1_n, v2_n))
else:
# can assign a sign when vectors are 2-dimensional
theta1 =
|
np.arctan2(v1_n[1], v1_n[0])
|
numpy.arctan2
|
import numpy as np
import functools
import sys
import pytest
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
put_along_axis
)
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
IS_64BIT = sys.maxsize > 2**32
def _add_keepdims(func):
""" hack in keepdims behavior into a function taking an axis """
@functools.wraps(func)
def wrapped(a, axis, **kwargs):
res = func(a, axis=axis, **kwargs)
if axis is None:
axis = 0 # res is now a scalar, so we can insert this anywhere
return np.expand_dims(res, axis=axis)
return wrapped
class TestTakeAlongAxis(object):
def test_argequivalent(self):
""" Test it translates from arg<func> to <func> """
from numpy.random import rand
a = rand(3, 4, 5)
funcs = [
(np.sort, np.argsort, dict()),
(_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
(_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
(np.partition, np.argpartition, dict(kth=2)),
]
for func, argfunc, kwargs in funcs:
for axis in list(range(a.ndim)) + [None]:
a_func = func(a, axis=axis, **kwargs)
ai_func = argfunc(a, axis=axis, **kwargs)
assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
def test_invalid(self):
""" Test it errors when indices has too few dimensions """
a = np.ones((10, 10))
ai = np.ones((10, 2), dtype=np.intp)
# sanity check
take_along_axis(a, ai, axis=1)
# not enough indices
assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
# bool arrays not allowed
assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
# float arrays not allowed
assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
# invalid axis
assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
def test_empty(self):
""" Test everything is ok with empty results, even with inserted dims """
a = np.ones((3, 4, 5))
ai = np.ones((3, 0, 5), dtype=np.intp)
actual = take_along_axis(a, ai, axis=1)
assert_equal(actual.shape, ai.shape)
def test_broadcast(self):
""" Test that non-indexing dimensions are broadcast in both directions """
a = np.ones((3, 4, 1))
ai = np.ones((1, 2, 5), dtype=np.intp)
actual = take_along_axis(a, ai, axis=1)
assert_equal(actual.shape, (3, 2, 5))
class TestPutAlongAxis(object):
def test_replace_max(self):
a_base = np.array([[10, 30, 20], [60, 40, 50]])
for axis in list(range(a_base.ndim)) + [None]:
# we mutate this in the loop
a = a_base.copy()
# replace the max with a small value
i_max = _add_keepdims(np.argmax)(a, axis=axis)
put_along_axis(a, i_max, -99, axis=axis)
# find the new minimum, which should max
i_min = _add_keepdims(np.argmin)(a, axis=axis)
assert_equal(i_min, i_max)
def test_broadcast(self):
""" Test that non-indexing dimensions are broadcast in both directions """
a = np.ones((3, 4, 1))
ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
put_along_axis(a, ai, 20, axis=1)
assert_equal(take_along_axis(a, ai, axis=1), 20)
class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_3d(self):
a = np.arange(27).reshape((3, 3, 3))
assert_array_equal(apply_along_axis(np.sum, 0, a),
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
def double(row):
return row * 2
class MyNDArray(np.ndarray):
pass
m =
|
np.array([[0, 1], [2, 3]])
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.