prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import numpy as np
import json
from PIL import Image
import cv2
import matplotlib.pyplot as plt
def expand_thresh(im, num_pixels):
ret = np.zeros(im.shape)
max_x = im.shape[0]
max_y = im.shape[1]
for i in range(im.shape[0]):
for j in range(im.shape[1]):
# If this pixel is true.
if im[i,j]:
# Set the surrounding <num_pixels> pixels to true.
min_x_ind = max(0, i - num_pixels)
max_x_ind = min(max_x, i + num_pixels)
min_y_ind = max(0, j - num_pixels)
max_y_ind = min(max_y, j + num_pixels)
ret[min_x_ind:max_x_ind,min_y_ind:max_y_ind] = 1
return ret
def detect_red_light(I, name=""):
'''
This function takes a numpy array <I> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
'''
BEGIN YOUR CODE
'''
'''
As an example, here's code that generates between 1 and 5 random boxes
of fixed size and returns the results in the proper format.
'''
# Color of light
light_color = [255, 215, 150]
# Light threshold values for rgb
light_thresh = [15, 70, 70]
# Color of red corona
red_corona_color = [160, 20, 20]
# Corona threshold values for rgb
corona_thresh = [40, 40, 40]
# Get our RGB channels
rI = np.array(I[:,:,0], dtype="int16")
gI = np.array(I[:,:,1], dtype="int16")
bI = np.array(I[:,:,2], dtype="int16")
# First filter the image based on light color
rI_f = np.abs(rI - light_color[0]) <= light_thresh[0]
gI_f = np.abs(gI - light_color[1]) <= light_thresh[1]
bI_f = np.abs(bI - light_color[2]) <= light_thresh[2]
# Combine all of these together to get a single threshold
lI = np.logical_and(rI_f, | np.logical_and(gI_f, bI_f) | numpy.logical_and |
import time
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import cm
from matplotlib.colors import LightSource
ID = 0
R_EXP = 1
R_OBS = 2
P_EXP = 3
P_OBS = 5
class AngularRepresentation:
angle_id_ = 0
position_expected_ = np.array([], dtype=np.double)
position_observed_ = np.array([], dtype=np.double)
range_expected_ = np.array([], dtype=np.double)
range_observed_ = np.array([], dtype=np.double)
def __init__(self, angle_id, range_expected, range_observed, position_expected, position_observed):
self.angle_id_ = angle_id
self.range_expected_ = np.concatenate([self.range_expected_, np.array([range_expected], dtype=np.double)])
self.range_observed_ = np.concatenate([self.range_observed_, np.array([range_observed], dtype=np.double)])
self.position_expected_ = | np.array([[position_expected[0]], [position_expected[1]]], dtype=np.double) | numpy.array |
# -*- coding: utf-8 -*-
"""
====================
Truncated Parameters
====================
If you have a large model, you don't need to plot all parameters at once.
Here we only plot the first four parameters. You could also simply pass the number four,
which means the *first* four parameters.
For fun, we also plot everything in green. Note you don't need to give multiple colours,
the shading is all computed from the colour hex code.
"""
import numpy as np
from numpy.random import normal, random, multivariate_normal
from chainconsumer import ChainConsumer
np.random.seed(0)
cov = | random(size=(6, 6)) | numpy.random.random |
"""Class to compute richnesses on a catalog by fitting a linear red-sequence
model, for use in the first part of training before a red-sequence model has
been found.
"""
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import fitsio
import numpy as np
import esutil
import copy
import sys
from .cluster import ClusterCatalog
from .color_background import ColorBackground
from .mask import HPMask
from .galaxy import GalaxyCatalog
from .catalog import Catalog, Entry
from .cluster import Cluster
from .cluster import ClusterCatalog
from .depthmap import DepthMap
from .cluster_runner import ClusterRunner
from .utilities import CubicSpline
###################################################
# Order of operations:
# __init__()
# _additional_initialization() [override this]
# run()
# _setup()
# _more_setup() [override this]
# _process_cluster() [override this]
# _postprocess() [override this]
# output()
###################################################
class RunColormem(ClusterRunner):
"""
The RunColormem class is derived from ClusterRunner, and will compute a
richness and membership probabilities using only an individual color per
cluster. The central galaxy is assumed to be close to the mean color of
the cluster, and the red sequence is fit in a single color-magnitude space
for a first richness estimate.
"""
def _additional_initialization(self, **kwargs):
"""
Additional initialization for RunColormem
"""
self.runmode = "calib_colormem"
self.filetype = "colormem"
self.use_colorbkg = True
self.use_parfile = False
def run(self, *args, **kwargs):
"""
Run a catalog through RunColormem.
Loop over all clusters and perform RunColormem computations on each cluster.
"""
return super(RunColormem, self).run(*args, **kwargs)
def _more_setup(self, *args, **kwargs):
"""
More setup for RunColormem.
"""
self.rmask_0 = self.config.calib_colormem_r0
self.rmask_beta = self.config.calib_colormem_beta
self.cat = ClusterCatalog.from_catfile(self.config.redgalfile,
zredstr=self.zredstr,
config=self.config,
cbkg=self.cbkg,
cosmo=self.cosmo,
r0=self.r0,
beta=self.beta)
use, = np.where((self.cat.z > self.config.zrange[0]) &
(self.cat.z < self.config.zrange[1]))
self.cat = self.cat[use]
self.cat.z_init = self.cat.z
# need to insert red model and get colormodes...
self.cat.add_fields([('redcolor', 'f4', self.config.nmag - 1)])
redmodel = Entry.from_fits_file(self.config.redgalmodelfile)
for j in xrange(self.config.nmag - 1):
spl = CubicSpline(redmodel.nodes, redmodel.meancol[:, j])
self.cat.redcolor[:, j] = spl(self.cat.z)
self.zbounds = np.concatenate([np.array([self.config.zrange[0] - 0.011]),
self.config.calib_colormem_zbounds,
| np.array([self.config.zrange[1] + 0.011]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from utils import sigmoid , gradient_descent_logistic_regresion
X1=np.array([]) # grade 1
X2= | np.array([]) | numpy.array |
# -*- coding: utf-8 -*-
"""
some basic plotting functions using PyLab
Created on Thu Feb 7 09:07:39 2019
@author: <NAME>
"""
import math
import matplotlib.pyplot as plt
import numpy as np
try:
from . import DWC_models as DWCmod
except ImportError:
import DWC_models as DWCmod
# some input parameters to use as a fallback if no other parameters are given
# most values as given in: <NAME>., & <NAME>. (2011). Dropwise Condensation Modeling Suitable for Superhydrophobic
# Surfaces. Journal of Heat Transfer, 133(8), 081502–081502. https://doi.org/10.1115/1.4003742
KimKim2011 = {"medium": "Water",
"p_steam": 337.8,
"deltaT_sub": 5,
"Theta": 90,
"CAH": 10, # estimated
"k_coat": 0.2,
"delta_coat": 0.000001,
"h_i": 15.7,
"c": 1,
"N_s": 250}
def choose_model(model_name):
""" wrapper to choose DWC model """
if model_name == "KimKim2011":
DWC_model = DWCmod.KimKim2011
else:
raise BaseException("Model \"" + model_name + "\" is unknown. Please choose one of the implemented models.")
return DWC_model
def print_results(input_params=KimKim2011, model="KimKim2011"):
""" prints calculated values and results """
DWC = choose_model(model)
q, q_n, q_N, r_min, r_e, r_max, Q_drop, n, N, misc = DWC(print_properties=True, **input_params)
print("\nresults:")
print("q:\t", q, "W/m²")
print("q_n:\t", q_n, "W/m²")
print("q_N:\t", q_N, "W/m²")
print("q_N/q:\t", 100 * round(q_N/q, 3), "%")
print("r_min:\t", r_min, "m")
print("r_e:\t", r_e, "m")
print("r_max:\t", r_max, "m")
print("misc.:\t", misc)
print("\nmodel used: ", model)
def plot_qdrop_theta_r(input_params=KimKim2011, model="KimKim2011", radii=[0.000001, 0.000005, 0.000010]):
""" plot rate of heat flow through a single droplet and heat flux at the base of the droplet vs. the contact angle
for specific drop radii. The heat flux refers to the contact area between droplet and surface.
Parameters
----------
input_params: dict
input parameters for the DWC model
model: str
name of the model that should be used
radii: list of floats
drop radii in m for which a graph should be drawn
"""
DWC = choose_model(model)
input_params = input_params.copy() # avoid changing global input_params
Theta = | np.linspace(5, 175, 20) | numpy.linspace |
"""
GWR is tested against results from GWR4
"""
import os
import pysal.lib as ps
from pysal.lib import io
import numpy as np
import multiprocessing as mp
import unittest
import pandas
from types import SimpleNamespace
from ..gwr import GWR, MGWR, MGWRResults
from ..sel_bw import Sel_BW
from ..diagnostics import get_AICc, get_AIC, get_BIC, get_CV
from pysal.model.spglm.family import Gaussian, Poisson, Binomial
class TestGWRGaussianPool(unittest.TestCase):
def setUp(self):
data_path = ps.examples.get_path("GData_utm.csv")
data = io.open(data_path)
self.coords = list(zip(data.by_col('X'), data.by_col('Y')))
self.y = np.array(data.by_col('PctBach')).reshape((-1, 1))
rural = np.array(data.by_col('PctRural')).reshape((-1, 1))
pov = np.array(data.by_col('PctPov')).reshape((-1, 1))
black = np.array(data.by_col('PctBlack')).reshape((-1, 1))
fb = np.array(data.by_col('PctFB')).reshape((-1, 1))
self.X = np.hstack([rural, pov, black])
self.mgwr_X = np.hstack([fb, black, rural])
self.BS_F = io.open(ps.examples.get_path('georgia_BS_F_listwise.csv'))
self.BS_NN = io.open(
ps.examples.get_path('georgia_BS_NN_listwise.csv'))
self.GS_F = io.open(ps.examples.get_path('georgia_GS_F_listwise.csv'))
self.GS_NN = io.open(
ps.examples.get_path('georgia_GS_NN_listwise.csv'))
MGWR_path = os.path.join(
os.path.dirname(__file__), 'georgia_mgwr_results.csv')
self.MGWR = pandas.read_csv(MGWR_path)
self.pool = mp.Pool(4)
def test_BS_NN_Pool(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_rural = self.BS_NN.by_col(' est_PctRural')
se_rural = self.BS_NN.by_col(' se_PctRural')
t_rural = self.BS_NN.by_col(' t_PctRural')
est_pov = self.BS_NN.by_col(' est_PctPov')
se_pov = self.BS_NN.by_col(' se_PctPov')
t_pov = self.BS_NN.by_col(' t_PctPov')
est_black = self.BS_NN.by_col(' est_PctBlack')
se_black = self.BS_NN.by_col(' se_PctBlack')
t_black = self.BS_NN.by_col(' t_PctBlack')
yhat = self.BS_NN.by_col(' yhat')
res = np.array(self.BS_NN.by_col(' residual'))
std_res = np.array(self.BS_NN.by_col(' std_residual')).reshape((-1, 1))
localR2 = np.array(self.BS_NN.by_col(' localR2')).reshape((-1, 1))
inf = np.array(self.BS_NN.by_col(' influence')).reshape((-1, 1))
cooksD = np.array(self.BS_NN.by_col(' CooksD')).reshape((-1, 1))
local_corr = os.path.join(os.path.dirname(__file__), 'local_corr.csv')
corr1 = np.array(io.open(local_corr))
local_vif = os.path.join(os.path.dirname(__file__), 'local_vif.csv')
vif1 = np.array(io.open(local_vif))
local_cn = os.path.join(os.path.dirname(__file__), 'local_cn.csv')
cn1 = np.array(io.open(local_cn))
local_vdp = os.path.join(os.path.dirname(__file__), 'local_vdp.csv')
vdp1 = np.array(io.open(local_vdp), dtype=np.float64)
spat_var_p_vals = [0., 0.0, 0.5, 0.2]
model = GWR(self.coords, self.y, self.X, bw=90.000, fixed=False,
sigma2_v1=False)
rslt = model.fit(pool=self.pool)
adj_alpha = rslt.adj_alpha
alpha = 0.01017489
critical_t = rslt.critical_tval(alpha)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
corr2, vif2, cn2, vdp2 = rslt.local_collinearity()
R2 = rslt.R2
np.testing.assert_allclose(
adj_alpha, np.array([0.02034978, 0.01017489, 0.0002035]),
rtol=1e-04)
self.assertAlmostEquals(critical_t, 2.6011011542649394)
self.assertAlmostEquals(np.around(R2, 4), 0.5924)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.19)
np.testing.assert_allclose(corr1, corr2, rtol=1e-04)
np.testing.assert_allclose(vif1, vif2, rtol=1e-04)
np.testing.assert_allclose(cn1, cn2, rtol=1e-04)
np.testing.assert_allclose(vdp1, vdp2, rtol=1e-04)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
sel = Sel_BW(self.coords, self.y, self.X)
bw = sel.search(pool=self.pool)
model = GWR(self.coords, self.y, self.X, bw)
result = model.fit(pool=self.pool)
p_vals = result.spatial_variability(sel, 10)
np.testing.assert_allclose(spat_var_p_vals, p_vals, rtol=1e-04)
def test_GS_F_Pool(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_rural = self.GS_F.by_col(' est_PctRural')
se_rural = self.GS_F.by_col(' se_PctRural')
t_rural = self.GS_F.by_col(' t_PctRural')
est_pov = self.GS_F.by_col(' est_PctPov')
se_pov = self.GS_F.by_col(' se_PctPov')
t_pov = self.GS_F.by_col(' t_PctPov')
est_black = self.GS_F.by_col(' est_PctBlack')
se_black = self.GS_F.by_col(' se_PctBlack')
t_black = self.GS_F.by_col(' t_PctBlack')
yhat = self.GS_F.by_col(' yhat')
res = np.array(self.GS_F.by_col(' residual'))
std_res = np.array(self.GS_F.by_col(' std_residual')).reshape((-1, 1))
localR2 = np.array(self.GS_F.by_col(' localR2')).reshape((-1, 1))
inf = np.array(self.GS_F.by_col(' influence')).reshape((-1, 1))
cooksD = np.array(self.GS_F.by_col(' CooksD')).reshape((-1, 1))
model = GWR(self.coords, self.y, self.X, bw=87308.298,
kernel='gaussian', fixed=True, sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 895.0)
self.assertAlmostEquals(np.floor(AIC), 890.0)
self.assertAlmostEquals(np.floor(BIC), 943.0)
self.assertAlmostEquals(np.around(CV, 2), 18.21)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_MGWR_Pool(self):
std_y = (self.y - self.y.mean()) / self.y.std()
std_X = (self.mgwr_X - self.mgwr_X.mean(axis=0)) / \
self.mgwr_X.std(axis=0)
selector = Sel_BW(self.coords, std_y, std_X, multi=True, constant=True)
selector.search(multi_bw_min=[2], multi_bw_max=[159], pool=self.pool)
model = MGWR(self.coords, std_y, std_X, selector=selector,
constant=True)
rslt = model.fit(pool=self.pool)
rslt_2 = model.fit(n_chunks=2,
pool=self.pool) #testing for n_chunks > 1
rslt_3 = model.fit(n_chunks=3, pool=self.pool)
rslt_20 = model.fit(n_chunks=20, pool=self.pool)
model_hat = MGWR(self.coords, std_y, std_X, selector=selector,
constant=True, hat_matrix=True)
rslt_hat = model_hat.fit(pool=self.pool)
rslt_hat_2 = model_hat.fit(n_chunks=2, pool=self.pool)
np.testing.assert_allclose(rslt_hat.R, rslt_hat_2.R, atol=1e-07)
np.testing.assert_allclose(
rslt_hat.S.dot(std_y).flatten(), self.MGWR.predy, atol=1e-07)
varnames = ['X0', 'X1', 'X2', 'X3']
# def suffixed(x):
# """ Quick anonymous function to suffix strings"""
# return ['_'.join(x) for x in varnames]
np.testing.assert_allclose(rslt.predy.flatten(), self.MGWR.predy,
atol=1e-07)
np.testing.assert_allclose(rslt.params, self.MGWR[varnames].values,
atol=1e-07)
np.testing.assert_allclose(
rslt.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_2.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_3.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_20.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt.tvalues, self.MGWR[[s + "_tvalues" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(rslt.resid_response,
self.MGWR.resid_response, atol=1e-04,
rtol=1e-04)
np.testing.assert_almost_equal(rslt.resid_ss, 50.899379467870425)
np.testing.assert_almost_equal(rslt.aicc, 297.12013812258783)
np.testing.assert_almost_equal(rslt.ENP, 11.36825087269831)
np.testing.assert_allclose(rslt.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_2.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_3.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_20.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(
rslt.adj_alpha_j,
np.array([[0.02601003, 0.01300501, 0.0002601],
[0.02845945, 0.01422973, 0.00028459],
[0.04428595, 0.02214297, 0.00044286],
[0.05708556, 0.02854278, 0.00057086]]), atol=1e-07)
np.testing.assert_allclose(
rslt.critical_tval(),
np.array([2.51210749, 2.47888792, 2.31069113, 2.21000184]),
atol=1e-07)
np.testing.assert_allclose(
rslt.filter_tvals(),
self.MGWR[[s + "_filter_tvalues" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(rslt.local_collinearity()[0].flatten(),
self.MGWR.local_collinearity, atol=1e-07)
def test_Prediction(self):
coords = np.array(self.coords)
index = np.arange(len(self.y))
test = index[-10:]
X_test = self.X[test]
coords_test = coords[test]
model = GWR(self.coords, self.y, self.X, 93, family=Gaussian(),
fixed=False, kernel='bisquare', sigma2_v1=False)
results = model.predict(coords_test, X_test)
params = np.array([
22.77198, -0.10254, -0.215093, -0.01405, 19.10531, -0.094177,
-0.232529, 0.071913, 19.743421, -0.080447, -0.30893, 0.083206,
17.505759, -0.078919, -0.187955, 0.051719, 27.747402, -0.165335,
-0.208553, 0.004067, 26.210627, -0.138398, -0.360514, 0.072199,
18.034833, -0.077047, -0.260556, 0.084319, 28.452802, -0.163408,
-0.14097, -0.063076, 22.353095, -0.103046, -0.226654, 0.002992,
18.220508, -0.074034, -0.309812, 0.108636
]).reshape((10, 4))
np.testing.assert_allclose(params, results.params, rtol=1e-03)
bse = np.array([
2.080166, 0.021462, 0.102954, 0.049627, 2.536355, 0.022111,
0.123857, 0.051917, 1.967813, 0.019716, 0.102562, 0.054918,
2.463219, 0.021745, 0.110297, 0.044189, 1.556056, 0.019513,
0.12764, 0.040315, 1.664108, 0.020114, 0.131208, 0.041613, 2.5835,
0.021481, 0.113158, 0.047243, 1.709483, 0.019752, 0.116944,
0.043636, 1.958233, 0.020947, 0.09974, 0.049821, 2.276849,
0.020122, 0.107867, 0.047842
]).reshape((10, 4))
np.testing.assert_allclose(bse, results.bse, rtol=1e-03)
tvalues = np.array([
10.947193, -4.777659, -2.089223, -0.283103, 7.532584, -4.259179,
-1.877395, 1.385161, 10.033179, -4.080362, -3.012133, 1.515096,
7.106862, -3.629311, -1.704079, 1.17042, 17.831878, -8.473156,
-1.633924, 0.100891, 15.750552, -6.880725, -2.74765, 1.734978,
6.980774, -3.586757, -2.302575, 1.784818, 16.644095, -8.273001,
-1.205451, -1.445501, 11.414933, -4.919384, -2.272458, 0.060064,
8.00251, -3.679274, -2.872176, 2.270738
]).reshape((10, 4))
np.testing.assert_allclose(tvalues, results.tvalues, rtol=1e-03)
localR2 = np.array([[0.53068693], [0.59582647], [0.59700925],
[0.45769954], [0.54634509], [0.5494828],
[0.55159604], [0.55634237], [0.53903842],
[0.55884954]])
np.testing.assert_allclose(localR2, results.localR2, rtol=1e-05)
predictions = np.array([[10.51695514], [9.93321992], [8.92473026],
[5.47350219], [8.61756585], [12.8141851],
[5.55619405], [12.63004172], [8.70638418],
[8.17582599]])
np.testing.assert_allclose(predictions, results.predictions,
rtol=1e-05)
def test_BS_NN_longlat_Pool(self):
GA_longlat = os.path.join(
os.path.dirname(__file__), 'ga_bs_nn_longlat_listwise.csv')
self.BS_NN_longlat = io.open(GA_longlat)
coords_longlat = list(
zip(
self.BS_NN_longlat.by_col(' x_coord'),
self.BS_NN_longlat.by_col(' y_coord')))
est_Int = self.BS_NN_longlat.by_col(' est_Intercept')
se_Int = self.BS_NN_longlat.by_col(' se_Intercept')
t_Int = self.BS_NN_longlat.by_col(' t_Intercept')
est_rural = self.BS_NN_longlat.by_col(' est_PctRural')
se_rural = self.BS_NN_longlat.by_col(' se_PctRural')
t_rural = self.BS_NN_longlat.by_col(' t_PctRural')
est_pov = self.BS_NN_longlat.by_col(' est_PctPov')
se_pov = self.BS_NN_longlat.by_col(' se_PctPov')
t_pov = self.BS_NN_longlat.by_col(' t_PctPov')
est_black = self.BS_NN_longlat.by_col(' est_PctBlack')
se_black = self.BS_NN_longlat.by_col(' se_PctBlack')
t_black = self.BS_NN_longlat.by_col(' t_PctBlack')
yhat = self.BS_NN_longlat.by_col(' yhat')
res = np.array(self.BS_NN_longlat.by_col(' residual'))
std_res = np.array(self.BS_NN_longlat.by_col(' std_residual')).reshape(
(-1, 1))
localR2 = np.array(self.BS_NN_longlat.by_col(' localR2')).reshape((-1,
1))
inf = np.array(self.BS_NN_longlat.by_col(' influence')).reshape((-1,
1))
cooksD = np.array(self.BS_NN_longlat.by_col(' CooksD')).reshape((-1,
1))
model = GWR(coords_longlat, self.y, self.X, bw=90.000, fixed=False,
spherical=True, sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
R2 = rslt.R2
self.assertAlmostEquals(np.around(R2, 4), 0.5921)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.11)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
class TestGWRPoissonPool(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'tokyo/Tokyomortality.csv')
data = io.open(data_path, mode='Ur')
self.coords = list(
zip(data.by_col('X_CENTROID'), data.by_col('Y_CENTROID')))
self.y = np.array(data.by_col('db2564')).reshape((-1, 1))
self.off = np.array(data.by_col('eb2564')).reshape((-1, 1))
OCC = np.array(data.by_col('OCC_TEC')).reshape((-1, 1))
OWN = np.array(data.by_col('OWNH')).reshape((-1, 1))
POP = np.array(data.by_col('POP65')).reshape((-1, 1))
UNEMP = np.array(data.by_col('UNEMP')).reshape((-1, 1))
self.X = | np.hstack([OCC, OWN, POP, UNEMP]) | numpy.hstack |
"""
This file contains routines used to read in data from the SHIELDS-PTM particle tracing
simulation as well as to calculate quantities based on the particle data.
Authors: <NAME>, <NAME>
"""
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
import numpy as np
from scipy import constants
from scipy import special
ckm = constants.speed_of_light/1e3
re_cm = 6371.0008*1e5 # Volumetric Earth radius in cm
class newDict(dict):
def __init__(self, *args, **kwargs):
self.attrs = dict()
if 'attrs' in kwargs:
if hasattr(kwargs['attrs'], '__getitem__'):
self.attrs = kwargs['attrs']
del kwargs['attrs']
super(newDict, self).__init__(*args, **kwargs)
class Particle(ABC):
"""Generic particle container
Subclass to make proton, etc.
"""
@abstractmethod
def __init__(self):
self._checkvalues()
def _checkvalues(self):
assert self.energy
assert self.charge
assert self.restmass
assert self.mass
def getRigidity(self, units='GV'):
"""Calculate rigidity in GV
Energy & rest mass energy are in MeV
Mass is in atomic mass number
Charge is in units of elementary charge (proton is 1, electron is -1)
"""
mcratio = self.mass/self.charge
en_part = self.energy**2 + 2*self.energy*self.restmass
rigidity_MV = mcratio * np.sqrt(en_part)
if units.upper() == 'GV':
rigidity = rigidity_MV/1e3
else:
raise NotImplementedError('Units other than GV for rigidity are not supported')
return rigidity
@classmethod
def fromRigidity(cls, rigidity_GV):
"""Given rigidity in GV, make particle
"""
rmv = rigidity_GV*1e3
asq = cls.mass**2
rmsq = cls.restmass**2
csq = cls.charge**2
part = asq*(asq*rmsq + csq*rmv**2)
e_k = (np.sqrt(part) - asq*cls.restmass)/asq
return cls(e_k)
class Proton(Particle):
charge = 1
mass, _, _ = constants.physical_constants['proton mass in u'] # AMU
restmass, _, _ = constants.physical_constants['proton mass energy equivalent in MeV']
def __init__(self, energy):
self.energy = energy
super().__init__()
class StormerCutoff():
def __init__(self, cd_moment=29950.126):
"""
Parameters
==========
cd_moment : float
Centered dipole moment of epoch in nT. Can, for example, be obtained
from spacepy.igrf (after instantiation, the value is in the moment
attribute). Default is the dipole moment for the 2010 epoch.
References
==========
- <NAME>. The Polar Aurora. Clarendon Press, Oxford, 1955.
- <NAME>. and <NAME>., “The Change in Geomagnetic Cutoffs Due to
Changes in the Dipole Equivalent of the Earth's Magnetic Field”, in
23rd International Cosmic Ray Conference (ICRC23), Volume 3, 1993, p. 781.
"""
self.set_coefficient(cd_moment)
def set_coefficient(self, cd_moment):
"""Set rigidity coefficient as dipole moment in mixed units
See section 2 of Smart and Shea (1993).
References
==========
- <NAME>. and <NAME>., “The Change in Geomagnetic Cutoffs Due to
Changes in the Dipole Equivalent of the Earth's Magnetic Field”, in
23rd International Cosmic Ray Conference (ICRC23), Volume 3, 1993, p. 781.
"""
mom_gauss_cm3 = (cd_moment/1e5)*re_cm**3
gauss_cm = mom_gauss_cm3/re_cm**2
# Now apply unit conversion, eV to Gev and volts to abvolts
coeff = 300*gauss_cm/1e9
self.moment_mixunits = coeff # dipole moment in mixed units
self.coeff_v = coeff/4 # vertical reduction
def cutoff_at_L(self, l_value, zenith, azimuth, as_energy=False, particle=Proton):
"""
Parameters
==========
l_value : float or array of floats
L at which to evaulate the cutoff
zenith : float or array of floats
Angle from the zenith (i.e., from radially outward) [degrees]
azimuth : float or array of floats
Azimuth angle [degrees]. Positive clockwise with zero in the direction of
the dipole axis (north). Arrival from East = 90; from West = 270.
as_energy : bool
If True, express the cutoff as proton energy in MeV.
Default is False, giving the cutoff in GV (rigidity).
particle : ptm_tools.Particle
Defaults to Proton.
Returns
=======
cutoff : float or array of floats
Geomagnetic cutoff at given dipole L. Units are in GV,
unless "as_energy" is True when units are in MeV.
Example
=======
>>> sc = StormerCutoff()
>>> # zenith angle = 90 so that arrival is perp to zenith
>>> c_east = sc.cutoff_at_L(1, 90, 90) # az. is 90 meaning East
>>> c_west = sc.cutoff_at_L(1, 90, 270) # west
>>> print(c_east, c_west) # in GV
57.24368301183024 9.821463284457387
"""
sh_zen = np.atleast_1d(zenith).shape
sh_azi = np.atleast_1d(azimuth).shape
l_vals = np.asarray(l_value)
sh_l = np.atleast_1d(l_vals).shape
ze_az = np.logical_or(sh_zen == sh_azi, np.logical_and(sh_zen == 1, sh_azi == 1))
lv = np.logical_or(sh_l == sh_zen, sh_zen == (1,))
allsame = np.logical_and(lv, ze_az)
if not allsame:
raise ValueError('{} {}'.format(lv, ze_az))
lamb = invariant_latitude_from_l(l_vals, degrees=False)
epsi = np.deg2rad(zenith)
eta = np.deg2rad(azimuth)
denom = (1 + np.sqrt(1 - np.sin(epsi) * np.sin(eta) * np.cos(lamb)**3))**2
cutoff = self.moment_mixunits/(l_vals**2 * denom)
if as_energy:
try:
return particle.fromRigidity(cutoff).energy
except ValueError:
e_arr = np.array([particle.fromRigidity(cc).energy for cc in cutoff])
return np.squeeze(e_arr)
return cutoff
class StormerVertical(StormerCutoff):
def cutoff_at_L(self, l_value, as_energy=False, particle=Proton):
"""
Parameters
==========
l_value : float or array of floats
L at which to evaulate the cutoff
as_energy : bool
If True, express the cutoff as proton energy in MeV.
Default is False, giving the cutoff in GV (rigidity).
particle : ptm_tools.Particle
Defaults to Proton.
Returns
=======
cutoff : float or array of floats
Geomagnetic cutoff at given dipole L. Units are in GV,
unless "as_energy" is True when units are in MeV.
"""
l_vals = np.asarray(l_value)
cutoff = self.coeff_v/l_vals**2
if as_energy:
try:
return particle.fromRigidity(cutoff).energy
except ValueError:
e_arr = np.array([particle.fromRigidity(cc).energy for cc in cutoff])
return np.squeeze(e_arr)
return cutoff
def invariant_latitude_from_l(l_value, degrees=True):
'''Get invariant latitude from dipole/McIlwain L value
Parameters
==========
l_value : float
Dipole (or McIlwain) L value
degrees : bool
If True (default) return invariant latitude in degrees.
Otherwise return in radians.
Returns
=======
lati : float
Invariant latitude
'''
radi = 1 # latitude at Earth's surface
try:
ronl = radi/l_value
except TypeError:
ronl = radi/np.atleast_1d(l_value)
lati = np.arccos(np.sqrt(ronl))
if degrees:
return np.rad2deg(lati)
else:
return lati
def l_from_invariant_latitude(lati):
'''Get L from the invariant latitude
Parameters
==========
lati : float
Invariant latitude [degrees]
Returns
=======
l_val : float
Dipole/McIlwain L value
'''
lat_rad = np.deg2rad(lati)
l_val = 1/np.cos(lat_rad)**2
return l_val
@contextmanager
def cd(newdir):
'''Context-managed chdir; changes back to original directory on exit or failure'''
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def parse_trajectory_file(fname):
"""
This routine reads a file of particle trajectory data generated by the ptm simulation.
Data from ptm is output in formatted ascii, with the time history of a particle trajectory
given by a 8-column array with an unknown number of rows. The data from each particle is
separated by a "#".
TIME XPOS YPOS ZPOS VPERP VPARA ENERGY PITCHANGLE
Results from this routine are returned as a dictionary with the trajectory of each particle
stored under a separate integer key (0-based indexing: first particle is 0, second is 1, etc.)
"""
with open(fname, 'r') as f:
flines = f.readlines()
# Scan for number of particles in trajectory file
nparts = sum([1 for line in flines if line.strip().startswith('#')])
# Make dictionary with metadata describing columns
# TODO: put this in a form that can be used for numpy record arrays?
dataDict = newDict(attrs={'header': 'TIME XPOS YPOS ZPOS VPERP VPARA ENERGY PITCHANGLE'})
for idx, line in enumerate(flines):
line = line.strip()
if line.startswith('#'):
# Starts a particle output section, grab particle ID
if idx != 0:
dataDict[pnum] = np.array(parr, dtype=np.float)
pnum = int(line.split()[1])
parr = []
else:
# Get data associated with particle
parr.append(line.split())
# put last particle into output dict
dataDict[pnum] = np.array(parr, dtype=np.float)
return dataDict
def parse_map_file(fnames):
"""
This is a convenience routine to read in a "map" file generated by the PTM simulation.
Since the particles aren't output in a logical or predictable manner, this routine also
assembles the energy--pitch-angle grids and returns the results in a dictionary. This
is all the information that is needed from SHIELDS-PTM to assemble flux maps using the
energy_to_flux routine.
If a list of filenames is passed in, all map files will be assembled as if they had come
from the same run. The user should be careful to only combine files when it makes physical
sense to do so. As this uses indices of unique energy and pitch angle, only combine runs
with unique sets of these values.
"""
if isinstance(fnames, str):
fnames = [fnames]
with open(fnames[0]) as fh:
header = fh.readline()
lines = np.loadtxt(fh)
for fname in fnames[1:]:
dum = np.loadtxt(fname, skiprows=1)
lines = np.vstack((lines, dum))
pavec = np.sort(np.unique(lines[:, 5]))
envec = np.sort(np.unique(lines[:, 4]))
sourcepos = header.strip().split()[-3:]
fluxmap = newDict(attrs={'position': | np.array(sourcepos, dtype=np.float) | numpy.array |
import codecs
import copy
import pickle
import numpy as np
import console
import constants
import regression
| np.random.seed(11) | numpy.random.seed |
from __future__ import division, print_function
import vtk
import numpy as np
from vtkplotter import settings
from vtk.util.numpy_support import numpy_to_vtk
import vtkplotter.utils as utils
import vtkplotter.colors as colors
from vtkplotter.actors import Actor, Assembly
import vtkplotter.docs as docs
__doc__ = (
"""
Submodule to generate basic geometric shapes.
"""
+ docs._defs
)
__all__ = [
"Point",
"Points",
"Line",
"Tube",
"Lines",
"Ribbon",
"Arrow",
"Arrows",
"FlatArrow",
"Polygon",
"Rectangle",
"Disc",
"Sphere",
"Spheres",
"Earth",
"Ellipsoid",
"Grid",
"Plane",
"Box",
"Cube",
"Spring",
"Cylinder",
"Cone",
"Pyramid",
"Torus",
"Paraboloid",
"Hyperboloid",
"Text",
"Latex",
"Glyph",
"Tensors",
]
########################################################################
def Point(pos=(0, 0, 0), r=12, c="red", alpha=1):
"""Create a simple point actor."""
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
actor = Points([pos], r, c, alpha)
return actor
def Points(plist, r=5, c="gray", alpha=1):
"""
Build a point ``Actor`` for a list of 2D/3D points.
Both shapes (N, 3) or (3, N) are accepted as input - if N>3.
For very large point clouds a list of colors and alpha can be assigned to each
point in the form `c=[(R,G,B,A), ... ]` where `0 <= R < 256, ... 0 <= A < 256`.
:param float r: point radius.
:param c: color name, number, or list of [R,G,B] colors of same length as plist.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
|manypoints.py|_ |lorenz.py|_
|lorenz|
"""
################ interpret the input format:
n = len(plist)
if n == 0:
return None
elif n == 3: # assume plist is in the format [all_x, all_y, all_z]
if utils.isSequence(plist[0]) and len(plist[0]) > 3:
plist = tuple(zip(plist[0], plist[1], plist[2]))
elif n == 2: # assume plist is in the format [all_x, all_y, 0]
if utils.isSequence(plist[0]) and len(plist[0]) > 3:
plist = tuple(zip(plist[0], plist[1], [0] * len(plist[0])))
if len(plist[0]) == 2: #make it 3d
plist = np.c_[np.array(plist), np.zeros(len(plist))]
################
if ( (utils.isSequence(c) and (len(c) > 3 or len(c[0]) == 4))
or utils.isSequence(alpha)
):
actor = _PointsColors(plist, r, c, alpha)
else:
n = len(plist) # refresh
sourcePoints = vtk.vtkPoints()
sourceVertices = vtk.vtkCellArray()
is3d = len(plist[0]) > 2
if is3d: # its faster
for pt in plist:
aid = sourcePoints.InsertNextPoint(pt)
sourceVertices.InsertNextCell(1)
sourceVertices.InsertCellPoint(aid)
else:
for pt in plist:
aid = sourcePoints.InsertNextPoint(pt[0], pt[1], 0)
sourceVertices.InsertNextCell(1)
sourceVertices.InsertCellPoint(aid)
pd = vtk.vtkPolyData()
pd.SetPoints(sourcePoints)
pd.SetVerts(sourceVertices)
if n == 1: # passing just one point
pd.GetPoints().SetPoint(0, [0, 0, 0])
else:
pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True))
actor = Actor(pd, c, alpha)
actor.GetProperty().SetPointSize(r)
if n == 1:
actor.SetPosition(plist[0])
settings.collectable_actors.append(actor)
return actor
def _PointsColors(plist, r, cols, alpha):
n = len(plist)
if n != len(cols):
colors.printc("~times mismatch in Points() colors", n, len(cols), c=1)
raise RuntimeError()
src = vtk.vtkPointSource()
src.SetNumberOfPoints(n)
src.Update()
vgf = vtk.vtkVertexGlyphFilter()
vgf.SetInputData(src.GetOutput())
vgf.Update()
pd = vgf.GetOutput()
pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True))
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(4)
ucols.SetName("pointsRGBA")
if utils.isSequence(alpha):
if len(alpha) != n:
colors.printc("~times mismatch in Points() alphas", n, len(alpha), c=1)
raise RuntimeError()
alphas = alpha
alpha = 1
else:
alphas = (alpha,) * n
if utils.isSequence(cols):
c = None
if len(cols[0]) == 4:
for i in range(n): # FAST
rc,gc,bc,ac = cols[i]
ucols.InsertNextTuple4(rc, gc, bc, ac)
else:
for i in range(n): # SLOW
rc,gc,bc = colors.getColor(cols[i])
ucols.InsertNextTuple4(rc*255, gc*255, bc*255, alphas[i]*255)
else:
c = cols
pd.GetPointData().SetScalars(ucols)
actor = Actor(pd, c, alpha)
actor.mapper.ScalarVisibilityOn()
actor.GetProperty().SetInterpolationToFlat()
actor.GetProperty().SetPointSize(r)
return actor
def Glyph(actor, glyphObj, orientationArray=None,
scaleByVectorSize=False, tol=0, c=None, alpha=1):
"""
At each vertex of a mesh, another mesh - a `'glyph'` - is shown with
various orientation options and coloring.
Color can be specfied as a colormap which maps the size of the orientation
vectors in `orientationArray`.
:param orientationArray: list of vectors, ``vtkAbstractArray``
or the name of an already existing points array.
:type orientationArray: list, str, vtkAbstractArray
:param bool scaleByVectorSize: glyph mesh is scaled by the size of the vectors.
:param float tol: set a minimum separation between two close glyphs
(not compatible with `orientationArray` being a list).
|glyphs.py|_ |glyphs_arrows.py|_
|glyphs| |glyphs_arrows|
"""
cmap = None
# user passing a color map to map orientationArray sizes
if c in list(colors._mapscales.cmap_d.keys()):
cmap = c
c = None
if tol:
actor = actor.clone().clean(tol)
poly = actor.polydata()
# user is passing an array of point colors
if utils.isSequence(c) and len(c) > 3:
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("glyphRGB")
for col in c:
cl = colors.getColor(col)
ucols.InsertNextTuple3(cl[0]*255, cl[1]*255, cl[2]*255)
poly.GetPointData().SetScalars(ucols)
c = None
if isinstance(glyphObj, Actor):
glyphObj = glyphObj.clean().polydata()
gly = vtk.vtkGlyph3D()
gly.SetInputData(poly)
gly.SetSourceData(glyphObj)
gly.SetColorModeToColorByScalar()
if orientationArray is not None:
gly.OrientOn()
gly.SetScaleFactor(1)
if scaleByVectorSize:
gly.SetScaleModeToScaleByVector()
else:
gly.SetScaleModeToDataScalingOff()
if isinstance(orientationArray, str):
if orientationArray.lower() == "normals":
gly.SetVectorModeToUseNormal()
else: # passing a name
gly.SetInputArrayToProcess(0, 0, 0, 0, orientationArray)
gly.SetVectorModeToUseVector()
elif isinstance(orientationArray, vtk.vtkAbstractArray):
poly.GetPointData().AddArray(orientationArray)
poly.GetPointData().SetActiveVectors("glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
gly.SetVectorModeToUseVector()
elif utils.isSequence(orientationArray) and not tol: # passing a list
actor.addPointVectors(orientationArray, "glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
if cmap:
gly.SetColorModeToColorByVector()
else:
gly.SetColorModeToColorByScalar()
gly.Update()
pd = gly.GetOutput()
gactor = Actor(pd, c, alpha)
if cmap:
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(512)
lut.Build()
for i in range(512):
r, g, b = colors.colorMap(i, cmap, 0, 512)
lut.SetTableValue(i, r, g, b, 1)
gactor.mapper.SetLookupTable(lut)
gactor.mapper.ScalarVisibilityOn()
gactor.mapper.SetScalarModeToUsePointData()
rng = pd.GetPointData().GetScalars().GetRange()
gactor.mapper.SetScalarRange(rng[0], rng[1])
gactor.GetProperty().SetInterpolationToFlat()
settings.collectable_actors.append(gactor)
return gactor
def Tensors(domain, source='ellipsoid', useEigenValues=True, isSymmetric=True,
threeAxes=False, scale=1, maxScale=None, length=None,
c=None, alpha=1):
"""Geometric representation of tensors defined on a domain or set of points.
Tensors can be scaled and/or rotated according to the source at eache input point.
Scaling and rotation is controlled by the eigenvalues/eigenvectors of the symmetrical part
of the tensor as follows:
For each tensor, the eigenvalues (and associated eigenvectors) are sorted
to determine the major, medium, and minor eigenvalues/eigenvectors.
The eigenvalue decomposition only makes sense for symmetric tensors,
hence the need to only consider the symmetric part of the tensor,
which is 1/2*(T+T.transposed()).
:param str source: preset type of source shape
['ellipsoid', 'cylinder', 'cube' or any specified ``Actor``]
:param bool useEigenValues: color source glyph using the eigenvalues or by scalars.
:param bool threeAxes: if `False` scale the source in the x-direction,
the medium in the y-direction, and the minor in the z-direction.
Then, the source is rotated so that the glyph's local x-axis lies
along the major eigenvector, y-axis along the medium eigenvector, and z-axis along the minor.
If `True` three sources are produced, each of them oriented along an eigenvector
and scaled according to the corresponding eigenvector.
:param bool isSymmetric: If `True` each source glyph is mirrored (2 or 6 glyphs will be produced).
The x-axis of the source glyph will correspond to the eigenvector on output.
:param float length: distance from the origin to the tip of the source glyph along the x-axis
:param float scale: scaling factor of the source glyph.
:param float maxScale: clamp scaling at this factor.
|tensors| |tensors.py|_
"""
if 'ellip' in source:
src = vtk.vtkSphereSource()
src.SetPhiResolution(24)
src.SetThetaResolution(12)
elif 'cyl' in source:
src = vtk.vtkCylinderSource()
src.SetResolution(48)
src.CappingOn()
elif source == 'cube':
src = vtk.vtkCubeSource()
else:
src = source.normalize().polydata(False)
src.Update()
tg = vtk.vtkTensorGlyph()
tg.SetInputData(domain.GetMapper().GetInput())
tg.SetSourceData(src.GetOutput())
if c is None:
tg.ColorGlyphsOn()
else:
tg.ColorGlyphsOff()
tg.SetSymmetric(int(isSymmetric))
if length is not None:
tg.SetLength(length)
if useEigenValues:
tg.ExtractEigenvaluesOn()
tg.SetColorModeToEigenvalues()
else:
tg.SetColorModeToScalars()
tg.SetThreeGlyphs(threeAxes)
tg.ScalingOn()
tg.SetScaleFactor(scale)
if maxScale is None:
tg.ClampScalingOn()
maxScale = scale*10
tg.SetMaxScaleFactor(maxScale)
tg.Update()
tgn = vtk.vtkPolyDataNormals()
tgn.SetInputData(tg.GetOutput())
tgn.Update()
return Actor(tgn.GetOutput(), c, alpha)
def Line(p0, p1=None, c="r", alpha=1, lw=1, dotted=False, res=None):
"""
Build the line segment between points `p0` and `p1`.
If `p0` is a list of points returns the line connecting them.
A 2D set of coords can also be passed as p0=[x..], p1=[y..].
:param c: color name, number, or list of [R,G,B] colors.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
:param lw: line width.
:param bool dotted: draw a dotted line
:param int res: number of intermediate points in the segment
"""
# detect if user is passing a 2D ist of points as p0=xlist, p1=ylist:
if len(p0) > 3:
if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1):
# assume input is 2D xlist, ylist
p0 = list(zip(p0, p1))
p1 = None
# detect if user is passing a list of points:
if utils.isSequence(p0[0]):
ppoints = vtk.vtkPoints() # Generate the polyline
dim = len((p0[0]))
if dim == 2:
for i, p in enumerate(p0):
ppoints.InsertPoint(i, p[0], p[1], 0)
else:
ppoints.SetData(numpy_to_vtk(p0, deep=True))
lines = vtk.vtkCellArray() # Create the polyline.
lines.InsertNextCell(len(p0))
for i in range(len(p0)):
lines.InsertCellPoint(i)
poly = vtk.vtkPolyData()
poly.SetPoints(ppoints)
poly.SetLines(lines)
else: # or just 2 points to link
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p0)
lineSource.SetPoint2(p1)
if res:
lineSource.SetResolution(res)
lineSource.Update()
poly = lineSource.GetOutput()
actor = Actor(poly, c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
actor.base = np.array(p0)
actor.top = np.array(p1)
settings.collectable_actors.append(actor)
return actor
def Lines(startPoints, endPoints=None, c=None, alpha=1, lw=1, dotted=False, scale=1):
"""
Build the line segments between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
:param float scale: apply a rescaling factor to the lengths.
|lines|
.. hint:: |fitspheres2.py|_
"""
if endPoints is not None:
startPoints = list(zip(startPoints, endPoints))
polylns = vtk.vtkAppendPolyData()
for twopts in startPoints:
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(twopts[0])
if scale != 1:
vers = (np.array(twopts[1]) - twopts[0]) * scale
pt2 = np.array(twopts[0]) + vers
else:
pt2 = twopts[1]
lineSource.SetPoint2(pt2)
polylns.AddInputConnection(lineSource.GetOutputPort())
polylns.Update()
actor = Actor(polylns.GetOutput(), c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
settings.collectable_actors.append(actor)
return actor
def Tube(points, r=1, c="r", alpha=1, res=12):
"""Build a tube along the line defined by a set of points.
:param r: constant radius or list of radii.
:type r: float, list
:param c: constant color or list of colors for each point.
:type c: float, list
|ribbon.py|_ |tube.py|_
|ribbon| |tube|
"""
ppoints = vtk.vtkPoints() # Generate the polyline
ppoints.SetData(numpy_to_vtk(points, deep=True))
lines = vtk.vtkCellArray()
lines.InsertNextCell(len(points))
for i in range(len(points)):
lines.InsertCellPoint(i)
polyln = vtk.vtkPolyData()
polyln.SetPoints(ppoints)
polyln.SetLines(lines)
tuf = vtk.vtkTubeFilter()
tuf.CappingOn()
tuf.SetNumberOfSides(res)
tuf.SetInputData(polyln)
if utils.isSequence(r):
arr = numpy_to_vtk(np.ascontiguousarray(r), deep=True)
arr.SetName("TubeRadius")
polyln.GetPointData().AddArray(arr)
polyln.GetPointData().SetActiveScalars("TubeRadius")
tuf.SetVaryRadiusToVaryRadiusByAbsoluteScalar()
else:
tuf.SetRadius(r)
usingColScals = False
if utils.isSequence(c) and len(c) != 3:
usingColScals = True
cc = vtk.vtkUnsignedCharArray()
cc.SetName("TubeColors")
cc.SetNumberOfComponents(3)
cc.SetNumberOfTuples(len(c))
for i, ic in enumerate(c):
r, g, b = colors.getColor(ic)
cc.InsertTuple3(i, int(255 * r), int(255 * g), int(255 * b))
polyln.GetPointData().AddArray(cc)
c = None
tuf.Update()
polytu = tuf.GetOutput()
actor = Actor(polytu, c, alpha, computeNormals=0)
actor.phong()
if usingColScals:
actor.mapper.SetScalarModeToUsePointFieldData()
actor.mapper.ScalarVisibilityOn()
actor.mapper.SelectColorArray("TubeColors")
actor.mapper.Modified()
actor.base = np.array(points[0])
actor.top = np.array(points[-1])
settings.collectable_actors.append(actor)
return actor
def Ribbon(line1, line2, c="m", alpha=1, res=(200, 5)):
"""Connect two lines to generate the surface inbetween.
|ribbon| |ribbon.py|_
"""
if isinstance(line1, Actor):
line1 = line1.coordinates()
if isinstance(line2, Actor):
line2 = line2.coordinates()
ppoints1 = vtk.vtkPoints() # Generate the polyline1
ppoints1.SetData(numpy_to_vtk(line1, deep=True))
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(len(line1))
for i in range(len(line1)):
lines1.InsertCellPoint(i)
poly1 = vtk.vtkPolyData()
poly1.SetPoints(ppoints1)
poly1.SetLines(lines1)
ppoints2 = vtk.vtkPoints() # Generate the polyline2
ppoints2.SetData(numpy_to_vtk(line2, deep=True))
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(len(line2))
for i in range(len(line2)):
lines2.InsertCellPoint(i)
poly2 = vtk.vtkPolyData()
poly2.SetPoints(ppoints2)
poly2.SetLines(lines2)
# build the lines
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(poly1.GetNumberOfPoints())
for i in range(poly1.GetNumberOfPoints()):
lines1.InsertCellPoint(i)
polygon1 = vtk.vtkPolyData()
polygon1.SetPoints(ppoints1)
polygon1.SetLines(lines1)
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(poly2.GetNumberOfPoints())
for i in range(poly2.GetNumberOfPoints()):
lines2.InsertCellPoint(i)
polygon2 = vtk.vtkPolyData()
polygon2.SetPoints(ppoints2)
polygon2.SetLines(lines2)
mergedPolyData = vtk.vtkAppendPolyData()
mergedPolyData.AddInputData(polygon1)
mergedPolyData.AddInputData(polygon2)
mergedPolyData.Update()
rsf = vtk.vtkRuledSurfaceFilter()
rsf.CloseSurfaceOff()
rsf.SetRuledModeToResample()
rsf.SetResolution(res[0], res[1])
rsf.SetInputData(mergedPolyData.GetOutput())
rsf.Update()
actor = Actor(rsf.GetOutput(), c=c, alpha=alpha)
settings.collectable_actors.append(actor)
return actor
def FlatArrow(line1, line2, c="m", alpha=1, tipSize=1, tipWidth=1):
"""Build a 2D arrow in 3D space by joining two close lines.
|flatarrow| |flatarrow.py|_
"""
if isinstance(line1, Actor):
line1 = line1.coordinates()
if isinstance(line2, Actor):
line2 = line2.coordinates()
sm1, sm2 = np.array(line1[-1]), np.array(line2[-1])
v = (sm1-sm2)/3*tipWidth
p1 = sm1+v
p2 = sm2-v
pm1 = (sm1+sm2)/2
pm2 = (np.array(line1[-2])+np.array(line2[-2]))/2
pm12 = pm1-pm2
tip = pm12/np.linalg.norm(pm12)*np.linalg.norm(v)*3*tipSize/tipWidth + pm1
line1.append(p1)
line1.append(tip)
line2.append(p2)
line2.append(tip)
resm = max(100, len(line1))
actor = Ribbon(line1, line2, alpha=alpha, c=c, res=(resm, 1)).phong()
settings.collectable_actors.pop()
settings.collectable_actors.append(actor)
return actor
def Arrow(startPoint, endPoint, s=None, c="r", alpha=1, res=12):
"""
Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow|
"""
axis = np.array(endPoint) - np.array(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02
arr.SetTipRadius(sz)
arr.SetShaftRadius(sz / 1.75)
arr.SetTipLength(sz * 15)
arr.Update()
t = vtk.vtkTransform()
t.RotateZ(np.rad2deg(phi))
t.RotateY(np.rad2deg(theta))
t.RotateY(-90) # put it along Z
if s:
sz = 800.0 * s
t.Scale(length, sz, sz)
else:
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(arr.GetOutput())
tf.SetTransform(t)
tf.Update()
actor = Actor(tf.GetOutput(), c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(startPoint)
actor.DragableOff()
actor.base = np.array(startPoint)
actor.top = np.array(endPoint)
settings.collectable_actors.append(actor)
return actor
def Arrows(startPoints, endPoints=None, s=None, scale=1, c="r", alpha=1, res=12):
"""
Build arrows between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
Color can be specfied as a colormap which maps the size of the arrows.
:param float s: fix aspect-ratio of the arrow and scale its cross section
:param float scale: apply a rescaling factor to the length
:param c: color or array of colors
:param str cmap: color arrows by size using this color map
:param float alpha: set transparency
:param int res: set arrow resolution
|glyphs_arrows| |glyphs_arrows.py|_
"""
startPoints = np.array(startPoints)
if endPoints is None:
strt = startPoints[:,0]
endPoints = startPoints[:,1]
startPoints = strt
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02 * s
arr.SetTipRadius(sz*2)
arr.SetShaftRadius(sz)
arr.SetTipLength(sz * 10)
arr.Update()
pts = Points(startPoints, r=0.001, c=c, alpha=alpha).off()
orients = (endPoints - startPoints) * scale
arrg = Glyph(pts, arr.GetOutput(),
orientationArray=orients, scaleByVectorSize=True,
c=c, alpha=alpha).flat()
settings.collectable_actors.append(arrg)
return arrg
def Polygon(pos=(0, 0, 0), nsides=6, r=1, c="coral", alpha=1):
"""
Build a 2D polygon of `nsides` of radius `r` oriented as `normal`.
|Polygon|
"""
ps = vtk.vtkRegularPolygonSource()
ps.SetNumberOfSides(nsides)
ps.SetRadius(r)
ps.Update()
actor = Actor(ps.GetOutput(), c, alpha)
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor
def Rectangle(p1=(0, 0, 0), p2=(2, 1, 0), lw=1, c="g", alpha=1):
"""Build a rectangle in the xy plane identified by two corner points."""
p1 = np.array(p1)
p2 = np.array(p2)
pos = (p1 + p2) / 2
length = abs(p2[0] - p1[0])
height = abs(p2[1] - p1[1])
return Plane(pos, [0, 0, 1], length, height, c, alpha)
def Disc(
pos=(0, 0, 0),
r1=0.5,
r2=1,
c="coral",
alpha=1,
res=12,
resphi=None,
):
"""
Build a 2D disc of internal radius `r1` and outer radius `r2`.
|Disk|
"""
ps = vtk.vtkDiskSource()
ps.SetInnerRadius(r1)
ps.SetOuterRadius(r2)
ps.SetRadialResolution(res)
if not resphi:
resphi = 6 * res
ps.SetCircumferentialResolution(resphi)
ps.Update()
actor = Actor(ps.GetOutput(), c, alpha).flat()
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor
def Sphere(pos=(0, 0, 0), r=1, c="r", alpha=1, res=24):
"""Build a sphere at position `pos` of radius `r`.
|Sphere|
"""
ss = vtk.vtkSphereSource()
ss.SetRadius(r)
ss.SetThetaResolution(2 * res)
ss.SetPhiResolution(res)
ss.Update()
pd = ss.GetOutput()
actor = Actor(pd, c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor
def Spheres(centers, r=1, c="r", alpha=1, res=8):
"""
Build a (possibly large) set of spheres at `centers` of radius `r`.
Either `c` or `r` can be a list of RGB colors or radii.
|manyspheres| |manyspheres.py|_
"""
cisseq = False
if utils.isSequence(c):
cisseq = True
if cisseq:
if len(centers) > len(c):
colors.printc("~times Mismatch in Spheres() colors", len(centers), len(c), c=1)
raise RuntimeError()
if len(centers) != len(c):
colors.printc("~lightningWarning: mismatch in Spheres() colors", len(centers), len(c))
risseq = False
if utils.isSequence(r):
risseq = True
if risseq:
if len(centers) > len(r):
colors.printc("times Mismatch in Spheres() radius", len(centers), len(r), c=1)
raise RuntimeError()
if len(centers) != len(r):
colors.printc("~lightning Warning: mismatch in Spheres() radius", len(centers), len(r))
if cisseq and risseq:
colors.printc("~noentry Limitation: c and r cannot be both sequences.", c=1)
raise RuntimeError()
src = vtk.vtkSphereSource()
if not risseq:
src.SetRadius(r)
src.SetPhiResolution(res)
src.SetThetaResolution(2 * res)
src.Update()
psrc = vtk.vtkPointSource()
psrc.SetNumberOfPoints(len(centers))
psrc.Update()
pd = psrc.GetOutput()
vpts = pd.GetPoints()
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if cisseq:
glyph.SetColorModeToColorByScalar()
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("colors")
for i, p in enumerate(centers):
vpts.SetPoint(i, p)
cx, cy, cz = colors.getColor(c[i])
ucols.InsertNextTuple3(cx * 255, cy * 255, cz * 255)
pd.GetPointData().SetScalars(ucols)
glyph.ScalingOff()
elif risseq:
glyph.SetScaleModeToScaleByScalar()
urads = vtk.vtkFloatArray()
urads.SetName("scales")
for i, p in enumerate(centers):
vpts.SetPoint(i, p)
urads.InsertNextValue(r[i])
pd.GetPointData().SetScalars(urads)
else:
for i, p in enumerate(centers):
vpts.SetPoint(i, p)
glyph.SetInputData(pd)
glyph.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(glyph.GetOutput())
actor = Actor()
actor.SetMapper(mapper)
actor.GetProperty().SetInterpolationToPhong()
actor.GetProperty().SetOpacity(alpha)
if cisseq:
mapper.ScalarVisibilityOn()
else:
mapper.ScalarVisibilityOff()
actor.GetProperty().SetColor(colors.getColor(c))
settings.collectable_actors.append(actor)
return actor
def Earth(pos=(0, 0, 0), r=1, lw=1):
"""Build a textured actor representing the Earth.
|geodesic| |geodesic.py|_
"""
import os
tss = vtk.vtkTexturedSphereSource()
tss.SetRadius(r)
tss.SetThetaResolution(72)
tss.SetPhiResolution(36)
earthMapper = vtk.vtkPolyDataMapper()
earthMapper.SetInputConnection(tss.GetOutputPort())
earthActor = Actor(c="w")
earthActor.SetMapper(earthMapper)
atext = vtk.vtkTexture()
pnmReader = vtk.vtkPNMReader()
cdir = os.path.dirname(__file__)
if cdir == "":
cdir = "."
fn = settings.textures_path + "earth.ppm"
pnmReader.SetFileName(fn)
atext.SetInputConnection(pnmReader.GetOutputPort())
atext.InterpolateOn()
earthActor.SetTexture(atext)
if not lw:
earthActor.SetPosition(pos)
return earthActor
es = vtk.vtkEarthSource()
es.SetRadius(r / 0.995)
earth2Mapper = vtk.vtkPolyDataMapper()
earth2Mapper.SetInputConnection(es.GetOutputPort())
earth2Actor = Actor()
earth2Actor.SetMapper(earth2Mapper)
earth2Mapper.ScalarVisibilityOff()
earth2Actor.GetProperty().SetLineWidth(lw)
ass = Assembly([earthActor, earth2Actor])
ass.SetPosition(pos)
settings.collectable_actors.append(ass)
return ass
def Ellipsoid(pos=(0, 0, 0), axis1=(1, 0, 0), axis2=(0, 2, 0), axis3=(0, 0, 3),
c="c", alpha=1, res=24):
"""
Build a 3D ellipsoid centered at position `pos`.
.. note:: `axis1` and `axis2` are only used to define sizes and one azimuth angle.
|projectsphere|
"""
elliSource = vtk.vtkSphereSource()
elliSource.SetThetaResolution(res)
elliSource.SetPhiResolution(res)
elliSource.Update()
l1 = np.linalg.norm(axis1)
l2 = np.linalg.norm(axis2)
l3 = np.linalg.norm(axis3)
axis1 = np.array(axis1) / l1
axis2 = np.array(axis2) / l2
axis3 = np.array(axis3) / l3
angle = np.arcsin(np.dot(axis1, axis2))
theta = np.arccos(axis3[2])
phi = np.arctan2(axis3[1], axis3[0])
t = vtk.vtkTransform()
t.PostMultiply()
t.Scale(l1, l2, l3)
t.RotateX(np.rad2deg(angle))
t.RotateY(np.rad2deg(theta))
t.RotateZ(np.rad2deg(phi))
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(elliSource.GetOutput())
tf.SetTransform(t)
tf.Update()
pd = tf.GetOutput()
actor = Actor(pd, c=c, alpha=alpha)
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(pos)
actor.base = -np.array(axis1) / 2 + pos
actor.top = np.array(axis1) / 2 + pos
settings.collectable_actors.append(actor)
return actor
def Grid(
pos=(0, 0, 0),
normal=(0, 0, 1),
sx=1,
sy=1,
c="g",
alpha=1,
lw=1,
resx=10,
resy=10,
):
"""Return a grid plane.
|brownian2D| |brownian2D.py|_
"""
ps = vtk.vtkPlaneSource()
ps.SetResolution(resx, resy)
ps.Update()
poly0 = ps.GetOutput()
t0 = vtk.vtkTransform()
t0.Scale(sx, sy, 1)
tf0 = vtk.vtkTransformPolyDataFilter()
tf0.SetInputData(poly0)
tf0.SetTransform(t0)
tf0.Update()
poly = tf0.GetOutput()
axis = np.array(normal) / np.linalg.norm(normal)
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
t = vtk.vtkTransform()
t.PostMultiply()
t.RotateY( | np.rad2deg(theta) | numpy.rad2deg |
from firedrake import *
import numpy as np
import collections
from ufl.geometry import *
class SignFlip:
def __init__(self,mesh):
self.mesh = mesh
def HSIGN(self):
Vt = FunctionSpace(self.mesh, "HDiv Trace", 0)
v = TestFunction(Vt)
x = SpatialCoordinate(self.mesh)
np.set_printoptions(precision=9)
n = FacetNormal(self.mesh)
normals = Function(Vt)
# =======================;
# Extract cell midpoints
# =======================;
node_per_cell = self.mesh.coordinates.cell_node_map().arity
mesh_type = self.mesh.ufl_cell().cellname()
mesh_val = self.mesh.coordinates.vector().array()
mesh_cell_map = self.mesh.coordinates.cell_node_map().values
# File('mesh.pvd').write(self.mesh.coordinates)
# print('mesh_cell_map',mesh_cell_map)
row = np.size(mesh_cell_map)
row,col = self.mesh.cell_set.size , node_per_cell
CELL_x = np.zeros((row,col))
CELL_y = np.zeros((row,col))
for irow in range(row):
for icol in range(col):
CELL_x[irow,icol] = mesh_val[mesh_cell_map[irow,icol]*2] # x-coord
CELL_y[irow,icol] = mesh_val[mesh_cell_map[irow,icol]*2+1] # y-coord
cellx_midPt = np.average(CELL_x,axis=1)
celly_midPt = np.average(CELL_y,axis=1)
# print('cell_x',CELL_x)
# =======================;
# Extract facet midpoints
# =======================;
cell_map = Vt.cell_node_map().values
# np.savetxt('cell_map.out', cell_map,fmt='%10.7g')
# print('cell_map:\n',cell_map)
# Fplus = normals('+')*v('+')*dS + normals * v*ds -\
# (inner(as_vector([conditional(x[0]<7.5,1,2),0]),n))('+')*v('+') * dS - inner(as_vector([conditional(x[0]<7.5,1,2),0]),n)*v *ds
# # (inner(as_vector([0,conditional(x[1]<2.5,1,2)]),n))('+')*v('+') * dS - inner(as_vector([0,conditional(x[1]<2.5,1,2)]),n)*v *ds
# solve(Fplus == 0, normals)
# normals_val_x = normals.vector().array()
# row , col = np.shape(cell_map)
# NPLUS = np.zeros((row,col))
# for irow in range(row):
# for icol in range(col):
# NPLUS[irow,icol] = normals_val_x[cell_map[irow,icol]]
# print('Nplus:\n',NPLUS)
facetx_midPt = np.zeros((row,col))
facety_midPt = np.zeros((row,col))
# if element is Quadrilateral
if mesh_type == "quadrilateral":
for irow in range(row):
for icol in range(col):
if icol == 0:
facetx_midPt[irow,icol] = (CELL_x[irow,0]+CELL_x[irow,1])/2.
facety_midPt[irow,icol] = (CELL_y[irow,0]+CELL_y[irow,1])/2.
elif icol == 1:
facetx_midPt[irow,icol] = (CELL_x[irow,2]+CELL_x[irow,3])/2.
facety_midPt[irow,icol] = (CELL_y[irow,2]+CELL_y[irow,3])/2.
elif icol == 2:
facetx_midPt[irow,icol] = (CELL_x[irow,0]+CELL_x[irow,2])/2.
facety_midPt[irow,icol] = (CELL_y[irow,0]+CELL_y[irow,2])/2.
else:
facetx_midPt[irow,icol] = (CELL_x[irow,1]+CELL_x[irow,3])/2.
facety_midPt[irow,icol] = (CELL_y[irow,1]+CELL_y[irow,3])/2.
# if element is Triangular
if mesh_type == "triangle":
for irow in range(row):
for icol in range(col):
facetAvgX = 0
facetAvgY = 0
for k in [x for x in range(col) if x != icol]:
facetAvgX = facetAvgX + CELL_x[irow,k]
facetAvgY = facetAvgY + CELL_y[irow,k]
facetx_midPt[irow,icol] = facetAvgX/2.
facety_midPt[irow,icol] = facetAvgY/2.
# print('facetx_midPt:\n',facetx_midPt)
# np.savetxt('facetx_midPt.out', facetx_midPt,fmt='%10.7g')
# print('facety_midPt:\n',facety_midPt)
# np.savetxt('facety_midPt.out', facety_midPt,fmt='%10.7g')
# mark boundaries
left = mesh_val[::2].min()
right = mesh_val[::2].max()
bottom = mesh_val[1::2].min()
top = mesh_val[1::2].max()
left_facet_idx = (np.where(facetx_midPt == left))
left_facet = cell_map[left_facet_idx]
right_facet_idx = (np.where(facetx_midPt == right))
right_facet = cell_map[right_facet_idx]
bottom_facet_idx = (np.where(facety_midPt == bottom))
bottom_facet = cell_map[bottom_facet_idx]
top_facet_idx = (np.where(facety_midPt == top))
top_facet = cell_map[top_facet_idx]
# Crack boundaries extracted manually and painfully
# crack_working.mesh
# left_crack_facet = np.array([776, 759, 808, 827, 845, 866])
# right_crack_facet = np.array([630, 649, 704, 746, 789, 811])
#
# left_crack_facet = np.array([367,327,267,220,247,289])
# right_crack_facet = np.array([438,443,446,415,371,348])
def find_repeat(numbers):
seen = set()
for num in numbers:
if num in seen:
return num
seen.add(num)
Left_cell = np.array([])
Right_cell = np.array([])
Bottom_cell = np.array([])
Top_cell = np.array([])
for irow in range(row):
numbers = CELL_x[irow,:] #each cell
if find_repeat(numbers) == left:
Left_cell = np.append(Left_cell, irow )
elif find_repeat(numbers) == right:
Right_cell = np.append(Right_cell, irow )
numbers = CELL_y[irow,:] #each cell
if find_repeat(numbers) == bottom:
Bottom_cell = np.append(Bottom_cell, irow )
elif find_repeat(numbers) == top:
Top_cell = np.append(Top_cell, irow )
#==================================;
# Figure out two adjacent neighbors;
# =================================;
TwoCell = np.ones((Vt.dim(),2)) * -999
TwoCell = TwoCell.astype('int32')
# all boundary facets have only one neighbor cell, Instead of NA we place -999
for itrn in range(Vt.dim()):
neigh = np.concatenate(np.where(cell_map == itrn)[::2] )
for itrnj in range(len(neigh)):
TwoCell[itrn,itrnj] = neigh[itrnj]
# print('TwoCell',TwoCell)
# Mark boundary facets:
boundary_facet = np.concatenate(np.where(TwoCell<0)[::2])
# marking left/right/top/bottom boundaries
# old implementation of marking boundaries
# it has a bug for 'right' and 'left' mesh
# only works for 'crossed'
# identify index where first column is Left_cell and last column is -999
# left_facet= np.array([])
# for cellidx in Left_cell:
# print('cellidx',cellidx)
# fct_index = np.concatenate( np.where( (TwoCell[:,-1] == -999)&(TwoCell[:,0] == cellidx)) )
# left_facet = np.append(left_facet,fct_index)
# print('left_facet',left_facet)
# right_facet= np.array([])
# for cellidx in Right_cell:
# fct_index = np.concatenate( np.where( (TwoCell[:,-1] == -999)&(TwoCell[:,0] == cellidx)) )
# right_facet = np.append(right_facet,fct_index)
# print('right_face',right_facet)
# top_facet= np.array([])
# for cellidx in Top_cell:
# fct_index = np.concatenate( np.where( (TwoCell[:,-1] == -999)&(TwoCell[:,0] == cellidx)) )
# top_facet = np.append(top_facet,fct_index)
# print('top_facet',top_facet)
# bottom_facet= np.array([])
# for cellidx in Bottom_cell:
# fct_index = np.concatenate( np.where( (TwoCell[:,-1] == -999)&(TwoCell[:,0] == cellidx)) )
# bottom_facet = np.append(bottom_facet,fct_index)
# print('bottom_facet',bottom_facet)
#======================================;
#determine + - edges at each cell map
#======================================;
Fplus = normals('+')*v('+')*dS + normals * v*ds -\
(inner(Constant((1,0)),n) )('+')*v('+') * dS - inner(Constant((1,0)),n)*v *ds
solve(Fplus == 0, normals)
normals_val_x = normals.vector().array()
Fminus = normals('+')*v('+')*dS + normals * v*ds -\
(inner(Constant((0,1)),n) )('+')*v('+') * dS - inner(Constant((0,1)),n)*v *ds
solve(Fminus == 0, normals)
normals_val_y = normals.vector().array()
IdxVertic = np.concatenate(np.where(normals_val_x == 0) )
# IdxVertic = np.concatenate(np.where((normals_val_x >-1e-5)|(normals_val_x<1e-5) ))
# IdxVertic = np.concatenate(np.where((normals_val_x >-10)|(normals_val_x<10) ))
row , col = | np.shape(cell_map) | numpy.shape |
#!/usr/bin/env python
# coding: utf-8
"""
9 - Initiators & Deleters
=========================
So far we have provided a prior in all our examples, defining where we think our tracks will start.
This has also been for a fixed number of tracks. In practice, targets may appear and disappear
all the time. This could be because they enter/exit the sensor's field of view.
The location/state of the targets' birth may also be unknown and varying.
"""
# %%
# Simulating multiple targets
# ---------------------------
# Here we'll simulate multiple targets moving at a constant velocity. A Poisson distribution will
# be used to sample the number of new targets which are born at a particular timestep, and a simple
# draw from a uniform distribution will be used to decide if a target will be removed. Each target
# will have a random position and velocity on birth.
from datetime import datetime
from datetime import timedelta
import numpy as np
from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \
ConstantVelocity
from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState
np.random.seed(1991)
start_time = datetime.now()
truths = set() # Truths across all time
current_truths = set() # Truths alive at current time
transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.005),
ConstantVelocity(0.005)])
for k in range(20):
# Death
for truth in current_truths.copy():
if np.random.rand() <= 0.05: # Death probability
current_truths.remove(truth)
# Update truths
for truth in current_truths:
truth.append(GroundTruthState(
transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)),
timestamp=start_time + timedelta(seconds=k)))
# Birth
for _ in range(np.random.poisson(0.6)): # Birth probability
x, y = initial_position = np.random.rand(2) * [20, 20] # Range [0, 20] for x and y
x_vel, y_vel = (np.random.rand(2))*2 - 1 # Range [-1, 1] for x and y velocity
state = GroundTruthState([x, x_vel, y, y_vel], timestamp=start_time + timedelta(seconds=k))
# Add to truth set for current and for all timestamps
truth = GroundTruthPath([state])
current_truths.add(truth)
truths.add(truth)
from stonesoup.plotter import Plotter
plotter = Plotter()
plotter.ax.set_ylim(-5, 25)
plotter.plot_ground_truths(truths, [0, 2])
# %%
# Generate Detections and Clutter
# -------------------------------
# Next, generate detections with clutter just as in the previous tutorials, skipping over the truth
# paths that weren't alive at the current time step.
from scipy.stats import uniform
from stonesoup.types.detection import TrueDetection
from stonesoup.types.detection import Clutter
from stonesoup.models.measurement.linear import LinearGaussian
measurement_model = LinearGaussian(
ndim_state=4,
mapping=(0, 2),
noise_covar=np.array([[0.25, 0],
[0, 0.25]])
)
all_measurements = []
for k in range(20):
measurement_set = set()
timestamp = start_time + timedelta(seconds=k)
for truth in truths:
try:
truth_state = truth[timestamp]
except IndexError:
# This truth not alive at this time.
continue
# Generate actual detection from the state with a 10% chance that no detection is received.
if | np.random.rand() | numpy.random.rand |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
import numpy as np
import torch
from utils.tools.logger import Logger as Log
try:
from extensions.ops.nms.nms_wrapper import nms
except ImportError:
print('DetHelper NMS ImportError.')
try:
from extensions.ops.nms.nms_wrapper import soft_nms
except ImportError:
print('DetHelper Soft-NMS ImportError.')
class DetHelper(object):
@staticmethod
def cls_nms(dets, labels, max_threshold=0.0, cls_keep_num=None, device_id=None, return_ind=False):
if isinstance(labels, torch.Tensor):
labels = labels.detach().cpu().numpy()
assert isinstance(labels, np.ndarray)
unique_labels = np.unique(labels)
cls_keep_list = list()
for c in unique_labels:
cls_index = np.where(labels == c)[0]
_, cls_keep = nms(dets[cls_index], iou_thr=max_threshold, device_id=device_id)
if cls_keep_num is not None:
cls_keep = cls_keep[:cls_keep_num]
cls_keep_list.append(cls_index[cls_keep.cpu().numpy()])
keep_index = np.concatenate(cls_keep_list, 0)
return keep_index if return_ind else dets[keep_index]
@staticmethod
def cls_softnms(dets, labels, max_threshold=0.0, min_score=0.001, sigma=0.5, method='linear', cls_keep_num=None):
if isinstance(labels, torch.Tensor):
labels = labels.detach().cpu().numpy()
assert isinstance(labels, np.ndarray)
unique_labels = | np.unique(labels) | numpy.unique |
import os
import json
import enum
from typing import Any, List
from typing import Dict
import numpy as np
class LaneAssociation(enum.Enum):
LEFT = 0
CENTER = 1
RIGHT = 2
NONE = 3
class ObjectClass(enum.Enum):
CAR = 0
TRUCK = 1
MOTORBIKE = 2
NONE = 4
NUM_VEHICLES = 6
NUM_ITERATIONS = 1_000
FRAME_DURATION_MS = 50
FRAME_DURATION_S = FRAME_DURATION_MS / 1000.0
VEHICLE_FILENAME = "vehicle_data.json"
VEHICLE_FILEPATH = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "data", VEHICLE_FILENAME
)
EGO_FILENAME = "ego_data.json"
EGO_VEHICLE_FILEPATH = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "data", EGO_FILENAME
)
np.random.seed(42)
OBJECT_CLASSES = {
0: int(ObjectClass.CAR.value),
1: int(ObjectClass.CAR.value),
2: int(ObjectClass.CAR.value),
3: int(ObjectClass.MOTORBIKE.value),
4: int(ObjectClass.CAR.value),
5: int(ObjectClass.TRUCK.value),
}
OBJECT_WIDTHS = {0: 2.5, 1: 3.25, 2: 0.5}
OBJECT_HEIGHTS = {0: 5.0, 1: 15.0, 2: 2.0}
def kph_to_mps(kph: float) -> float:
return kph / 3.6
def generate_ego_vehicle_data() -> Dict[str, list]:
start_velocity_mps = round(kph_to_mps(120), 4)
start_lane = int(LaneAssociation.CENTER.value)
ego_vehicle_data: Dict[str, list] = {
"Lane": [1 for _ in range(NUM_ITERATIONS)],
"LongVelocity": [0.0 for _ in range(NUM_ITERATIONS)],
"LatVelocity": [0.0 for _ in range(NUM_ITERATIONS)],
}
ego_vehicle_data["Lane"][0] = start_lane
ego_vehicle_data["LongVelocity"][0] = start_velocity_mps
ego_vehicle_data["LatVelocity"][0] = 0.0
for i in range(1, NUM_ITERATIONS):
ego_vehicle_data["Lane"][i] = int(LaneAssociation.CENTER.value)
eps = np.random.normal(loc=0.0, scale=0.1)
new_velocity = ego_vehicle_data["LongVelocity"][i - 1] + eps
if new_velocity >= 25.0 and new_velocity <= 40.0:
ego_vehicle_data["LongVelocity"][i] = round(new_velocity, 4)
ego_vehicle_data["LatVelocity"][i] = round(eps, 4)
else:
ego_vehicle_data["LongVelocity"][i] = ego_vehicle_data[
"LongVelocity"
][i - 1]
ego_vehicle_data["LatVelocity"][i] = ego_vehicle_data[
"LatVelocity"
][i - 1]
return ego_vehicle_data
def generate_vehicle_data(id_: int, ego_velocity_mps: List[float]) -> dict:
if id_ < 2:
start_lane_associations = int(LaneAssociation.LEFT.value)
start_velocity = round( | np.random.uniform(110, 130) | numpy.random.uniform |
# Copyright 2019 The OpenRadar Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from numba import njit, jit
from . import compensation
from . import utils
def doppler_resolution(band_width, start_freq_const=77, ramp_end_time=62, idle_time_const=100, num_loops_per_frame=128,
num_tx_antennas=3):
"""Calculate the doppler resolution for the given radar configuration.
Args:
start_freq_const (float): Frequency chirp starting point.
ramp_end_time (float): Frequency chirp end point.
idle_time_const (int): Idle time between chirps.
band_width (float): Radar config bandwidth.
num_loops_per_frame (int): The number of loops in each frame.
num_tx_antennas (int): The number of transmitting antennas (tx) on the radar.
Returns:
doppler_resolution (float): The doppler resolution for the given radar configuration.
"""
light_speed_meter_per_sec = 299792458
center_frequency = start_freq_const * 1e9 + band_width / 2
chirp_interval = (ramp_end_time + idle_time_const) * 1e-6
doppler_resolution = light_speed_meter_per_sec / (
2 * num_loops_per_frame * num_tx_antennas * center_frequency * chirp_interval)
return doppler_resolution
def separate_tx(signal, num_tx, vx_axis=1, axis=0):
"""Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.
Args:
signal (ndarray): Received signal.
num_tx (int): Number of transmit antennas.
vx_axis (int): Axis in which to accumulate the separated data.
axis (int): Axis in which the data is interleaved.
Returns:
ndarray: Separated received data in the
"""
# Reorder the axes
reordering = np.arange(len(signal.shape))
reordering[0] = axis
reordering[axis] = 0
if not (reordering == np.arange(len(reordering))).all(): # check if has to reorder
signal = signal.transpose(reordering)
# if signal.shape[1] != num_tx * signal.shape[1]:
# pass
out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)], axis=vx_axis)
return out.transpose(reordering)
def doppler_processing(radar_cube,
num_tx_antennas=2,
clutter_removal_enabled=False,
interleaved=True,
window_type_2d=None,
accumulate=True):
"""Perform 2D FFT on the radar_cube.
Interleave the radar_cube, perform optional windowing and 2D FFT on the radar_cube. Optional antenna couping
signature removal can also be performed right before 2D FFT. In constrast to the original TI codes, CFAR and peak
grouping are intentionally separated with 2D FFT for the easiness of debugging.
Args:
radar_cube (ndarray): Output of the 1D FFT. If not interleaved beforehand, it has the shape of
(numChirpsPerFrame, numRxAntennas, numRangeBins). Otherwise, it has the shape of
(numRangeBins, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler
dimension is located at the last axis.
num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.
clutter_removal_enabled (boolean): Flag to enable naive clutter removal.
interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not
interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numRangeBins). The interleaving
process will transform it such that it becomes (numRangeBins, numVirtualAntennas, num_doppler_bins). Note
that this interleaving is only applicable to TDM radar, i.e. each tx emits the chirp sequentially.
window_type_2d (mmwave.dsp.utils.Window): Optional windowing type before doppler FFT.
accumulate (boolean): Flag to reduce the numVirtualAntennas dimension.
Returns:
detMatrix (ndarray): (numRangeBins, num_doppler_bins) complete range-dopper information. Original datatype is
uint16_t. Note that azimuthStaticHeatMap can be extracted from zero-doppler index for
visualization.
aoa_input (ndarray): (numRangeBins, numVirtualAntennas, num_doppler_bins) ADC data reorganized by vrx instead of
physical rx.
"""
if interleaved:
# radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,
# and even are from tx2) so it becomes ( , numVirtualAntennas, numADCSamples), where
# numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.
# Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.
fft2d_in = separate_tx(radar_cube, num_tx_antennas, vx_axis=1, axis=0)
else:
fft2d_in = radar_cube
# (Optional) Static Clutter Removal
if clutter_removal_enabled:
# fft2d_in = compensation.clutter_removal(fft2d_in, axis=0)
fft2d_in[1:] = compensation.clutter_removal(fft2d_in[1:], axis=0) # TODO this or above with static detection removal
# transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)
fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))
# Windowing 16x32
if window_type_2d:
fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=2)
# It is assumed that doppler is at the last axis.
# FFT 32x32
fft2d_out = np.fft.fft(fft2d_in)
aoa_input = fft2d_out
# Save zero-Doppler as azimuthStaticHeatMap, watch out for the bit shift in
# original code.
# Log_2 Absolute Value
fft2d_log_abs = np.log2(np.abs(fft2d_out))
# Accumulate
if accumulate:
return np.sum(fft2d_log_abs, axis=1), aoa_input # TODO divide by num_rx?
else:
return fft2d_log_abs, aoa_input
def doppler_estimation(radar_cube,
beam_weights,
num_tx_antennas=2,
clutter_removal_enabled=False,
interleaved=False,
window_type_2d=None):
"""Perform doppler estimation on the weighted sum of range FFT output across all virtual antennas.
In contrast to directly computing doppler FFT from the output of range FFT, this function combines it across all
the virtual receivers first using the weights generated from beamforming. Then FFT is performed and argmax is taken
across each doppler axis to return the indices of max doppler values.
Args:
radar_cube (ndarray): Output of the 1D FFT with only ranges on detected objects. If not interleaved beforehand,
it has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). Otherwise, it has the shape of
(numDetObjs, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler
dimension is located at the last axis.
beam_weights (ndarray): Weights to sum up the radar_cube across the virtual receivers. It is from the
beam-forming and has the shape of (numVirtualAntennas, numDetObjs)
num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.
clutter_removal_enabled (boolean): Flag to enable naive clutter removal.
interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not
interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). The interleaveing process
will transform it such that it becomes (numDetObjs, numVirtualAntennas, num_doppler_bins). Note that this
interleaving is only appliable to TDM radar, i.e. each tx emits the chirp sequentially.
window_type_2d (string): Optional windowing type before doppler FFT.
Returns:
doppler_est (ndarray): (numDetObjs) Doppler index for each detected objects. Positive index means moving away
from radar while negative index means moving towards the radar.
"""
fft2d_in = None
if not interleaved:
num_doppler_bins = radar_cube.shape[0] / num_tx_antennas
# radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,
# and even are from tx2) so it becomes (num_doppler_bins, numVirtualAntennas, numADCSamples), where
# numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.
# Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.
if num_tx_antennas == 2:
fft2d_in = np.concatenate((radar_cube[0::2, ...], radar_cube[1::2, ...]), axis=1)
elif num_tx_antennas == 3:
fft2d_in = np.concatenate((radar_cube[0::3, ...], radar_cube[1::3, ...], radar_cube[2::3, ...]), axis=1)
# transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)
fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))
else:
num_doppler_bins = radar_cube.shape[2]
# (Optional) Static Clutter Removal
if clutter_removal_enabled:
fft2d_in = compensation.clutter_removal(fft2d_in)
# Weighted sum across all virtual receivers.
fft2d_in = np.einsum('ijk,jk->ik', fft2d_in, beam_weights)
# Windowing 16x32
if window_type_2d:
fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=1)
# It is assumed that doppler is at the last axis.
# FFT 32x32
doppler_est = | np.fft.fft(fft2d_in) | numpy.fft.fft |
import os, torch
import numpy as np
from numba import njit, jit
from UTILS.colorful import *
from UTILS.tensor_ops import copy_clone, my_view, add_onehot_id_at_last_dim, add_obs_container_subject
import pickle
from config import GlobalConfig
DEBUG = True
# @njit
def distance_matrix(A):
assert A.shape[-1] == 2 # assert 2D situation
n_subject = A.shape[-2] # is 2
A = np.repeat(np.expand_dims(A,-2), n_subject, axis=-2) # =>(64, 100, 100, 2)
At = np.swapaxes(A,-2,-3) # =>(64, 100, 100, 2)
dis = At-A # =>(64, 100, 100, 2)
dis = np.linalg.norm(dis, axis=-1)
return dis
def stack_padding(l):
import itertools
return np.column_stack((itertools.zip_longest(*l, fillvalue=0)))
def dir_to_rad_angle(delta_pos):
result = np.empty(delta_pos.shape[:-1], dtype=complex)
result.real = delta_pos[...,0]; result.imag = delta_pos[...,1]
rad_angle = np.angle(result)
return rad_angle
def reg_angle_deg(deg):
return (deg + 180)%360 -180
def reg_angle(rad):
# it's OK to show "RuntimeWarning: invalid value encountered in remainder"
return (rad + np.pi)%(2*np.pi) -np.pi
class ShellEnvWrapper(object):
def __init__(self, n_agent, n_thread, space, mcv, RL_functional,
alg_config, scenario_config):
self.n_agent = n_agent
self.n_thread = n_thread
self.space = space
self.mcv = mcv
self.RL_functional = RL_functional
self.n_cluster = alg_config.n_cluster
self.n_basic_dim = scenario_config.obs_vec_length
self.n_entity = scenario_config.num_entity
self.num_guards = scenario_config.num_guards
self.num_attackers = scenario_config.num_attackers
self.agent_uid = scenario_config.uid_dictionary['agent_uid']
self.entity_uid = scenario_config.uid_dictionary['entity_uid']
self.f_uid = scenario_config.uid_dictionary['guards_uid']
self.h_uid = scenario_config.uid_dictionary['attackers_uid']
self.dec = scenario_config.dec_dictionary
self.n_object = scenario_config.num_object
self.load_checkpoint = alg_config.load_checkpoint
self.cold_start = True
self._division_obsR_init = None
self._division_obsL_init = None
@staticmethod
def get_binary_array(n, n_bits, dtype=np.float32):
arr = np.zeros(n_bits, dtype=dtype)
pointer = 0
while True:
arr[pointer] = int(n%2==1)
n = n >> 1
pointer += 1
if n == 0: break
return arr
def interact_with_env(self, State_Recall):
act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=np.int) - 1 # 初始化全部为 -1
obs_range = 2.0
# read internal coop graph info
obs = State_Recall['Latest-Obs']
obs = my_view(obs, [0, self.n_object, -1])
alive = obs[:,:,self.dec['alive']]
falive = alive[:, self.f_uid]
halive = alive[:, self.h_uid]
pos = obs[:,:,self.dec['pos']]
ang = obs[:,:,self.dec['ang']]
vel = obs[:,:,self.dec['vel']]
idx = obs[:,:,self.dec['id']]
dis = distance_matrix(pos)
f2h_dis = dis[:, self.f_uid, :][:, :, self.h_uid]
f2f_dis = dis[:, self.f_uid, :][:, :, self.f_uid]
agent_emb = obs[:,self.f_uid]
h_emb = obs[:,self.h_uid]
agent_emb = add_onehot_id_at_last_dim(agent_emb) # add onehot vec
n_act = 8
fpos = pos[:,self.f_uid]
fvel = vel[:,self.f_uid]
fang = ang[:,self.f_uid]
hpos = pos[:,self.h_uid]
# target_emb 加入一些特殊的点如何,加入了原始的8个动作
n_thread = pos.shape[0]
observable = [np.where(
(f2h_dis[i] < obs_range).any(axis=0) & (halive[i] == 1)
)[0] for i in range(n_thread)] # $n_thread.
target_emb = np.zeros(shape=(n_thread, 50, self.n_basic_dim), dtype=obs.dtype)
# compatibility[mask.bool()] = -math.inf,即mask=1为pad, =0为正常
padding_mask = np.ones(shape=(n_thread, self.n_agent, 50), dtype=np.int)
for i in range(n_thread):
target_emb[i, :len(observable[i])] = h_emb[i, observable[i]]
padding_mask[i, :, :len(observable[i])] = 0 # 非pad部分
dummy_action_target = np.zeros(shape=(n_thread, n_act, self.n_basic_dim), dtype=obs.dtype)
for i in range(n_act):
dummy_action_target[:,i] = self.get_binary_array(n=i+15, n_bits=7, dtype=obs.dtype)
# 前8个entity是原始动作,mask=1为pad, =0为正常
action_c_padding = np.zeros(shape=(n_thread, self.n_agent, 8), dtype=np.int)
entity_emb = np.concatenate((dummy_action_target, target_emb), -2)
padding_mask = np.concatenate((action_c_padding, padding_mask), -1)
# how to pad to be attention-compatable? a headaches
# action :: what action [ Discrete(2), for action type || Discrete(n_act), for action|| Discrete(n_target), for which target]
# action is agent wise
ENV_PAUSE = State_Recall['ENV-PAUSE']
all_emb = {'agent_final_emb':agent_emb[~ENV_PAUSE],
'entity_final_emb':entity_emb[~ENV_PAUSE],
'padding_mask':padding_mask[~ENV_PAUSE]}
Internal_State_Recall = {'threads_active_flag':~ENV_PAUSE, 'all_emb':all_emb, 'Test-Flag':State_Recall['Test-Flag']}
act_active, internal_recall = self.RL_functional.interact_with_env_genuine(Internal_State_Recall)
act[~ENV_PAUSE] = act_active
# print(act_active)
use_raw_action = (act < 8)
if (~use_raw_action).any():
t_act = np.where(use_raw_action, 0, act-8) # (8=$n_thread, 50=$n_agent)
observable_padded = stack_padding(observable) # (8=$n_thread, 4=$max_n_obs)
T_type_uid = np.take_along_axis(observable_padded, axis=1, indices=t_act)
target_pos = np.take_along_axis(hpos, axis=1,
indices=np.repeat(np.expand_dims(T_type_uid,-1), 2, axis=-1))
use_raw_action_=np.repeat(np.expand_dims(use_raw_action,-1), 2, axis=-1)
target_pos = np.where(use_raw_action_, np.nan, target_pos)
delta_pos = target_pos - fpos
to_target_acc = self.dir_to_action(delta_pos, fvel)
to_target_acc = to_target_acc.squeeze(-1)
rad_angle = dir_to_rad_angle(delta_pos) # agent.state.p_ang = np.pi*1/2 if agent.attacker else 0*np.pi/2
delta_angle = reg_angle(rad_angle - fang)
delta_angle = np.where(np.abs(delta_angle) < np.pi/8, np.nan, delta_angle)
delta_angle = reg_angle(delta_angle)
to_target_acc = np.where(delta_angle<0, 6, to_target_acc)
to_target_acc = np.where(delta_angle>0, 5, to_target_acc)
# assert (np.where(use_raw_action, 1, to_target_acc) != 0).all()
act = np.where(use_raw_action, act, to_target_acc)
# to_target_dir = self.dir_to_daction(delta_pos, fang)
# all_action = self.dir_to_action(vec=delta_pos, vel=act_dec['agent_vel']) # 矢量指向selected entity
# to_target_final = to_target_dir + int(to_target_dir==0)*to_target_acc
actions_list = []
act = np.expand_dims(act, -1)
for i in range(self.n_agent): actions_list.append(act[:,i,:])
actions_list = np.array(actions_list)
# return necessary handles to main platform
if self.cold_start: self.cold_start = False
# <2> call a empty frame to gather reward
State_Recall['_hook_'] = internal_recall['_hook_']
assert State_Recall['_hook_'] is not None
return actions_list, State_Recall # state_recall dictionary will preserve states for next action making
@staticmethod
def __random_select_init_value_(n_container, n_subject):
t_final = []; entropy = np.array([])
for _ in range(20): # max entropy in samples
tmp = np.random.randint(low=0, high=n_container, size=(n_subject,), dtype=np.long); t_final.append(tmp)
entropy = np.append(entropy, sum([ -(sum(tmp==i)/n_subject)*np.log(sum(tmp==i)/n_subject) if sum(tmp==i)!=0 else -np.inf for i in range(n_container)]))
return t_final[np.argmax(entropy)]
@staticmethod
@jit(forceobj=True)
def dir_to_action(vec, vel):
def np_mat3d_normalize_each_line(mat):
return mat / np.expand_dims( | np.linalg.norm(mat, axis=2) | numpy.linalg.norm |
import numpy as np
def LSE(A, B, initialGamma=1000.):
coefficients_matrix = A # layer 4
rhs_matrix = B # right hand matrix. ie Ys
S = np.eye(coefficients_matrix.shape[1]) * initialGamma
x = np.zeros((coefficients_matrix.shape[1], 1)) # need to correct for multi-dim B
for i in range(coefficients_matrix.shape[0]):
a = coefficients_matrix[i, :] # row of wn * extended_x for each sample_set.
am = np.matrix(a)
b = | np.array(rhs_matrix[i]) | numpy.array |
import numpy as np
import pandas as pd
def get_module_score_matrix(alldata, signature_mask, nbins=10, ncontrol=5):
"""generates a module score (a la Seurat's AddModuleScore, see Tirosh 2016) on a matrix, with a mask. I don't call this directly (<NAME> 3 June 2020).
Parameters
----------
alldata : matrix
signature_mask : indices corresponding to signature
nbins : Number of quantile bins to use
(Default value = 10)
ncontrol : Number of genes in each matched quantile
(Default value = 5)
Returns
-------
"""
assert len(signature_mask) == alldata.shape[0]
nonsigdata = alldata[~signature_mask, :]
sigdata = alldata[signature_mask, :]
gene_quantiles = pd.qcut(alldata.mean(axis=1),
nbins,
duplicates='drop',
labels=False)
sigdata_quantiles = gene_quantiles[signature_mask]
nonsigdata_quantiles = gene_quantiles[~signature_mask]
signature = sigdata.mean(axis=0)
control_group = []
for quantile in np.unique(sigdata_quantiles):
noccurrences = np.sum(sigdata_quantiles == quantile)
# there's an edge case wherein if size is greater than the number of genes to be taken without replacement, this will generate an error. Will be an issue in the few-gene regime
control_group += list(
np.random.choice(np.where(nonsigdata_quantiles == quantile)[0],
size=ncontrol * noccurrences,
replace=False))
control_group = np.array(control_group)
control = nonsigdata[control_group].mean(axis=0)
return signature - control
def generate_masked_module_score(loom,
layername,
mask,
genelist,
ca_name,
nbins=10,
ncontrol=5,
gene_ra='gene'):
"""
Parameters
----------
loom : Name of loom object of interest.
layername : Layername on which the module score will be calculated.
mask : Mask over cells over which the score will be calculated ("None" for all cells)
genelist : list of gene names in signature
ca_name : Desired name of signature to be made into a column attribute.
nbins :
(Default value = 10)
ncontrol :
(Default value = 5)
gene_ra :
(Default value = 'gene')
Returns
-------
"""
from panopticon.analysis import get_module_score_matrix
if mask is None:
mask = np.array([True] * loom.shape[1])
matrix = loom[layername][:, mask]
sigmask = np.isin(loom.ra[gene_ra], genelist)
sig_score = get_module_score_matrix(matrix,
sigmask,
nbins=nbins,
ncontrol=ncontrol)
maskedscores = []
counter = 0
for flag in mask:
if flag:
maskedscores.append(sig_score[counter])
counter += 1
else:
maskedscores.append(np.nan)
loom.ca[ca_name] = maskedscores
def generate_nmf_and_loadings(loom,
layername,
nvargenes=2000,
n_components=100,
verbose=False):
"""
Parameters
----------
loom : LoomConnection object
layername :
nvargenes :
(Default value = 2000)
n_components :
(Default value = 100)
verbose :
(Default value = False)
Returns
-------
"""
from sklearn.decomposition import NMF
if 'GeneVar' not in loom.ra.keys():
raise Exception(
"Necessary to have already generated gene expression variances")
vargenemask = loom.ra['GeneVar'] > np.sort(
loom.ra['GeneVar'])[::-1][nvargenes]
X = loom[layername][vargenemask, :]
model = NMF(n_components=n_components,
init='random',
random_state=0,
verbose=verbose)
W = model.fit_transform(X)
H = model.components_
# record NMF basis
nmf_factors = []
counter = 0
for isvargene in vargenemask:
if isvargene:
nmf_factors.append(W[counter, :])
counter += 1
else:
nmf_factors.append(np.zeros(W.shape[1]))
nmf_factors = np.vstack(nmf_factors)
factor_sums = []
for i in range(nmf_factors.shape[1]):
loom.ra['{} NMF Component {}'.format(
layername, i + 1)] = nmf_factors[:, i] / np.sum(nmf_factors[:, i])
factor_sums.append(np.sum(nmf_factors[:, i]))
factor_sums = np.array(factor_sums)
# record NMF loadings
for i in range(H.shape[0]):
loom.ca['{} NMF Loading Component {}'.format(
layername, i + 1)] = H[i, :] * factor_sums[i]
loom.attrs['NumberNMFComponents'] = n_components
def generate_incremental_pca(loom,
layername,
batch_size=512,
n_components=50,
min_size_for_incrementalization=5000):
"""Computes a principal component analysis (PCA) over a layer of interest. Defaults to incremental PCA (using IncrementalPCA from sklearn.decomposition) but will switch to conventional PCA for LoomConnections with cell
numbers below a min_size_for_incrementalization. Will write the n_components principal components as row attributes:
- (layer) PC (PC number, 1-indexed)
The following are written as attributes:
- NumberPrincipalComponents_(layername). This is simply n_components.
- PCExplainedVariancedRatio_(layername). This is explained_variance_ratio_ from the PCA model.
Will also run panopticon.analysis.generate_pca_loadings.
Parameters
----------
loom : The LoomConnection instance upon which PCA will be calculated.
layername : The layer of the loom file over which the PCs will be computed.
batch_size :
(Default value = 512)
n_components :
(Default value = 50)
min_size_for_incrementalization :
(Default value = 5000)
Returns
-------
"""
from tqdm import tqdm
from sklearn.decomposition import IncrementalPCA, PCA
from panopticon.analysis import generate_pca_loadings
batch_size_altered = False
while loom.shape[1] % batch_size < n_components:
batch_size += 1
batch_size_altered = True
if batch_size_altered:
print(
"Batch size increased to {} so that smallest batch will be greater than n_components"
.format(batch_size))
if loom.shape[1] < min_size_for_incrementalization:
print(
"Loom size below threshold for incremental PCA; running conventional PCA"
)
pca = PCA(n_components=n_components)
pca.fit(loom[layername][:, :].T)
else:
pca = IncrementalPCA(n_components=n_components)
for (ix, selection, view) in tqdm(loom.scan(axis=1,
batch_size=batch_size),
total=loom.shape[1] // batch_size):
#pca.partial_fit(view[:, :].transpose())
pca.partial_fit(view[layername][:, :].T)
for i in range(50):
loom.ra['{} PC {}'.format(layername, i + 1)] = pca.components_[i]
loom.attrs['NumberPrincipalComponents_{}'.format(layername)] = n_components
loom.attrs['PCAExplainedVarianceRatio_{}'.format(
layername)] = pca.explained_variance_ratio_
generate_pca_loadings(loom, layername)
def generate_pca_loadings(loom, layername, dosparse=False, batch_size=512):
"""
Parameters
----------
loom : LoomConnection object
layername :
dosparse :
(Default value = False)
batch_size :
(Default value = 512)
Returns
-------
"""
from tqdm import tqdm
if len([x for x in loom.ra.keys() if '{} PC'.format(layername) in x]) == 0:
raise Exception(
"It seems that {} PCs have not yet been calculated; ergo no UMAP embedding can be calculated"
.format(layername))
# n_pca_cols = np.max([
# int(x.split(' PC ')[1]) for x in loom.ra.keys()
# if '{} PC '.format(layername) in x
# ])
n_pca_cols = loom.attrs['NumberPrincipalComponents_{}'.format(layername)]
# elif pca_tpye == 'rank':
# n_pca_cols = np.max([int(x.split(' PC ')[1]) for x in loom.ra.keys() if 'rank PC' in x])
pcas = []
for col in [
'{} PC {}'.format(layername, x) for x in range(1, n_pca_cols + 1)
]:
pcas.append(loom.ra[col])
cellpca = np.vstack(pcas).T
if dosparse:
sparsedata = loom[layername].sparse().tocsr()
compresseddata = (sparsedata.transpose() @ cellpca)
else:
compresseddatalist = []
for (ix, selection, view) in tqdm(loom.scan(axis=1,
batch_size=batch_size),
total=loom.shape[1] // batch_size):
compresseddatalist.append(view[layername][:, :].T @ cellpca)
compresseddata = np.vstack(compresseddatalist)
for iloading in range(compresseddata.shape[1]):
loom.ca['{} PC {} Loading'.format(layername, iloading +
1)] = compresseddata[:, iloading]
def get_pca_loadings_matrix(loom, layername, n_components=None):
"""
Parameters
----------
loom : LoomConnection object
layername : corresponding layer from which to retrieve PCA loadings matrix
components_to_use :
(Default value = None)
n_components :
(Default value = None)
Returns
-------
"""
pca_loadings = []
if n_components != None:
for col in [
'{} PC {} Loading'.format(layername, x)
for x in range(1, n_components + 1)
]:
pca_loadings.append(loom.ca[col])
else:
n_components = loom.attrs['NumberPrincipalComponents_{}'.format(
layername)]
for col in [
'{} PC {} Loading'.format(layername, x)
for x in range(1, n_components + 1)
]:
pca_loadings.append(loom.ca[col])
return np.vstack(pca_loadings).T
def generate_embedding(loom,
layername,
min_dist=0.0001,
n_neighbors=30,
n_epochs=1000,
metric='correlation',
random_state=None,
n_pca_components=None,
mode='pca'):
"""
Parameters
----------
loom : LoomConnection object
pca_type :
(Default value = 'log_tpm')
layername :
min_dist :
(Default value = 0.0001)
n_neighbors :
(Default value = 30)
n_epochs :
(Default value = 1000)
metric :
(Default value = 'correlation')
random_state :
(Default value = None)
pca_cols_to_use :
(Default value = None)
components_to_use :
(Default value = None)
mode :
(Default value = 'nmf')
n_pca_components :
(Default value = None)
Returns
-------
"""
import umap
if mode not in ['pca', 'nmf']:
raise Exception("Currently only two modes implemented: nmf and pca")
if mode == 'pca':
from panopticon.analysis import get_pca_loadings_matrix
compressed = get_pca_loadings_matrix(loom,
layername,
n_components=n_pca_components)
elif mode == 'nmf':
n_nmf_cols = loom.attrs['NumberNMFComponents']
print(n_nmf_cols)
nmf_loadings = []
if components_to_use != None:
for col in [
'{} NMF Loading Component {}'.format(layername, x)
for x in components_to_use
]:
nmf_loadings.append(loom.ca[col])
else:
for col in [
'{} NMF Loading Component {}'.format(layername, x)
for x in range(1, n_nmf_cols + 1)
]:
nmf_loadings.append(loom.ca[col])
print(len(nmf_loadings))
compressed = np.vstack(nmf_loadings).T
reducer = umap.UMAP(random_state=None,
min_dist=min_dist,
n_neighbors=n_neighbors,
metric=metric,
verbose=True,
n_epochs=n_epochs)
embedding = reducer.fit_transform(compressed)
loom.ca['{} {} UMAP embedding 1'.format(layername,
mode.upper())] = embedding[:, 0]
loom.ca['{} {} UMAP embedding 2'.format(layername,
mode.upper())] = embedding[:, 1]
def get_subclustering(X,
score_threshold,
max_clusters=50,
min_input_size=10,
silhouette_threshold=0.2,
regularization_factor=0.01,
clusteringcachedir='clusteringcachedir/'):
"""
Parameters
----------
embedding :
score_threshold :
max_clusters :
(Default value = 10)
X :
min_input_size :
(Default value = 5)
silhouette_threshold :
(Default value = 0.2)
regularization_factor :
(Default value = 0.01)
clusteringcachedir :
(Default value = 'clusteringcachedir/')
Returns
-------
"""
from sklearn.metrics import silhouette_score
from sklearn.cluster import AgglomerativeClustering
from tqdm import tqdm
if X.shape[0] < min_input_size:
return np.array([0] * X.shape[0])
else:
clustering = AgglomerativeClustering(n_clusters=2,
memory=clusteringcachedir,
affinity='cosine',
compute_full_tree=True,
linkage='average')
scores = []
minnk = 2
for nk in tqdm(range(minnk, np.min([max_clusters, X.shape[0]]), 1)):
clustering.set_params(n_clusters=nk)
clustering.fit(X)
score = silhouette_score(X,
clustering.labels_,
metric='cosine',
sample_size=None)
# sample_size=np.min([5000, X.shape[0]]))
scores.append(score)
#break
print("scores", np.array(scores))
# scores = scores - np.arange(len(scores))*regularization_factor
# print("corrected scores",np.array(scores))
if np.max(scores) >= score_threshold:
clustering.set_params(n_clusters=np.argmax(scores) + minnk)
clustering.fit(X)
print(
np.argmax(scores) + minnk, "clusters, with",
list(
pd.DataFrame(clustering.labels_)[0].value_counts().values),
"cells")
return clustering.labels_
else:
print("No score exceeded score threshold")
return np.array([0] * X.shape[0])
def generate_clustering(loom,
layername,
starting_clustering_depth=0,
n_clustering_iterations=3,
max_clusters='sqrt_rule',
mode='pca',
n_components=10,
silhouette_threshold=0.1,
clusteringcachedir='clusteringcachedir/',
out_of_core_batch_size=512,
min_subclustering_size=50,
first_round_leiden=False,
leiden_nneighbors=20,
leiden_iterations=10,
incremental_pca_threshold=10000):
"""
Parameters
----------
loom : LoomConnection object
final_clustering_depth : The clustering iteration on which to terminate; final_clustering_depth=3 will assign values through column attribute ClusteringIteration3
(Default value = 3)
starting_clustering_depth : The clustering iteration on which to begin; starting_clustering_depth=0 will assign values to column attribute ClusteringIteration0
(Default value = 0)
max_clusters :
(Default value = 200)
layername :
mode :
(Default value = 'pca')
silhouette_threshold :
(Default value = 0.1)
clusteringcachedir :
(Default value = 'clusteringcachedir/')
n_components :
(Default value = 10)
out_of_core_batch_size :
(Default value = 512)
n_clustering_iterations :
(Default value = 3)
min_subclustering_size :
(Default value = 50)
first_round_leiden :
(Default value = False)
leiden_nneighbors :
(Default value = 20)
leiden_iterations :
(Default value = 10)
incremental_pca_threshold :
(Default value = 10000)
Returns
-------
"""
if type(n_clustering_iterations
) != int or n_clustering_iterations < 1 or type(
starting_clustering_depth) != int:
raise Exception(
"final_clustering_depth and starting_clustering_depth must be natural numbers."
)
if (starting_clustering_depth > 0) and (
'ClusteringIteration{}'.format(starting_clustering_depth - 1)
not in loom.ca.keys()):
raise Exception(
"starting_clustering_depth not yet computed; please run with lower starting_clustering depth, or 0"
)
if mode not in ['pca', 'nmf']:
raise Exception("Currently only implemented for modes: pca and nmf")
from time import time
from sklearn.decomposition import IncrementalPCA
from tqdm import tqdm
from panopticon.analysis import get_subclustering
if mode == 'pca':
from sklearn.decomposition import PCA
elif mode == 'nmf':
from sklearn.decomposition import NMF
final_clustering_depth = starting_clustering_depth + n_clustering_iterations - 1
if starting_clustering_depth == 0:
if first_round_leiden:
from sklearn.neighbors import kneighbors_graph
from panopticon.analysis import get_pca_loadings_matrix
from panopticon.utilities import get_igraph_from_adjacency
import leidenalg
X = get_pca_loadings_matrix(loom, layername)
A = kneighbors_graph(X,
leiden_nneighbors,
mode='connectivity',
include_self=True,
metric='cosine')
ig = get_igraph_from_adjacency(A)
part = leidenalg.find_partition(
ig,
leidenalg.RBConfigurationVertexPartition,
n_iterations=leiden_iterations)
clustering = part.membership
else:
if mode == 'nmf':
n_nmf_cols = loom.attrs['NumberNMFComponents']
nmf_loadings = []
for col in [
'{} NMF Loading Component {}'.format(layername, x)
for x in range(1, n_nmf_cols + 1)
]:
nmf_loadings.append(loom.ca[col])
X = np.vstack(nmf_loadings).T
elif mode == 'pca':
from panopticon.analysis import get_pca_loadings_matrix
X = get_pca_loadings_matrix(loom,
layername,
n_components=n_components)
if max_clusters == 'sqrt_rule':
clustering = get_subclustering(
X,
silhouette_threshold,
max_clusters=int(np.floor(np.sqrt(X.shape[0]))),
clusteringcachedir=clusteringcachedir)
else:
clustering = get_subclustering(
X,
silhouette_threshold,
max_clusters=max_clusters,
clusteringcachedir=clusteringcachedir)
loom.ca['ClusteringIteration0'] = clustering
starting_clustering_depth = 1
for subi in range(starting_clustering_depth, final_clustering_depth + 1):
loom.ca['ClusteringIteration{}'.format(subi)] = ['U'] * len(
loom.ca['ClusteringIteration{}'.format(subi - 1)])
for cluster in set([
x for x in loom.ca['ClusteringIteration{}'.format(subi - 1)]
if x != 'U'
]): #will need to fix
mask = loom.ca['ClusteringIteration{}'.format(
subi -
1)] == cluster #first mask, check for top level clustering
#break
# mask = np.nonzero(mask) # workaround to accommodate h5py 3.* bug -- <NAME> 5 Aug 2021
print("No. cells in cluster", mask.sum())
if mode == 'nmf':
print(
"NMF operation currently requires loading full layer into memory"
)
start = time()
data_c = loom[layername][:, mask]
print("processing cluster", cluster, "; time to load: ",
time() - start, ", mask size: ", np.sum(mask))
model = NMF(n_components=np.min([50, data_c.shape[1]]),
init='random',
random_state=0)
X = model.fit_transform(data_c.T)
elif mode == 'pca':
if mask.sum() > incremental_pca_threshold:
print(
"Warning: running incremental PCA with loom.scan with masked items; this can lead to batches smaller than the number of principal components. If computation fails, try adjusting out_of_core_batch_size, or raising incremental_pca_threshold."
)
pca = IncrementalPCA(n_components=n_components)
for (ix, selection, view) in tqdm(
loom.scan(axis=1,
batch_size=out_of_core_batch_size,
items=mask,
layers=[layername]),
total=loom.shape[1] // out_of_core_batch_size):
pca.partial_fit(view[layername][:, :].T)
compresseddatalist = []
for (ix, selection, view) in tqdm(
loom.scan(axis=1,
batch_size=out_of_core_batch_size,
items=mask,
layers=[layername]),
total=loom.shape[1] // out_of_core_batch_size):
compresseddatalist.append(
view[layername][:, :].T @ pca.components_.T)
X = np.vstack(compresseddatalist)
elif mask.sum() < min_subclustering_size:
X = np.array(
[None] * mask.sum()
) # This is a hack to avoid computing PCA in cases where no clustering will be performed
else:
data_c = loom[layername][:, mask].T
model = PCA(n_components=np.min([10, data_c.shape[0]]),
random_state=0)
X = model.fit_transform(data_c)
if max_clusters == 'sqrt_rule':
nopath_clustering = get_subclustering(
X,
silhouette_threshold,
max_clusters=int(np.floor(np.sqrt(X.shape[0]))),
clusteringcachedir=clusteringcachedir,
min_input_size=min_subclustering_size)
else:
nopath_clustering = get_subclustering(
X,
silhouette_threshold,
max_clusters=max_clusters,
clusteringcachedir=clusteringcachedir,
min_input_size=min_subclustering_size)
fullpath_clustering = [
'{}-{}'.format(cluster, x) for x in nopath_clustering
]
loom.ca['ClusteringIteration{}'.format(
subi)][mask] = fullpath_clustering
loom.ca['ClusteringIteration{}'.format(subi)] = loom.ca[
'ClusteringIteration{}'.format(
subi)] #This is to force the changes to save to disk
def get_cluster_markers(loom, layername, cluster_level):
"""
Parameters
----------
loom : LoomConnection object
layername :
cluster_level :
Returns
-------
"""
from panopticon.analysis import get_cluster_differential_expression
diffex = {}
for cluster in np.unique(loom.ca[cluster_level]):
try:
diffex[cluster] = get_cluster_differential_expression(
loom,
layername,
cluster_level=cluster_level,
ident1=cluster,
verbose=True,
ident1_downsample_size=500,
ident2_downsample_size=500).query('MeanExpr1 > MeanExpr2')
except:
print("Some issue processing cluster {}".format(cluster))
return diffex
def get_cluster_embedding(loom,
layername,
cluster,
min_dist=0.01,
n_neighbors=None,
verbose=False,
mask=None,
genemask=None,
n_components_pca=50):
"""
Parameters
----------
loom : LoomConnection object
layername :
cluster :
min_dist :
(Default value = 0.01)
n_neighbors :
(Default value = None)
verbose :
(Default value = False)
mask :
(Default value = None)
genemask :
(Default value = None)
n_components_pca :
(Default value = 50)
Returns
-------
"""
from sklearn.decomposition import PCA
import umap
clustering_level = len(str(cluster).split('-')) - 1
if mask is None:
mask = loom.ca['ClusteringIteration{}'.format(
clustering_level)] == cluster
else:
print("Clustering over custom mask--ignoring cluster argument")
if n_neighbors is None:
n_neighbors = int(np.sqrt(np.sum(mask)))
if genemask is None:
data = loom[layername][:, mask]
else:
data = loom[layername][genemask, :][:, mask]
pca = PCA(n_components=np.min([n_components_pca, data.shape[1]]))
pca.fit(data[:, :].transpose())
cellpca = (data.T @ pca.components_.T)
reducer = umap.UMAP(random_state=17,
verbose=verbose,
min_dist=min_dist,
n_neighbors=n_neighbors,
metric='correlation')
# reducer.fit(cellpca, )
embedding = reducer.fit_transform(cellpca)
return embedding
def get_metafield_breakdown(loom,
cluster,
field,
complexity_cutoff=0,
mask=None):
"""
Parameters
----------
loom : LoomConnection object
cluster :
field :
complexity_cutoff :
(Default value = 0)
mask :
(Default value = None)
Returns
-------
"""
cluster_level = len(str(cluster).split('-')) - 1
if mask is None:
mask = (loom.ca['ClusteringIteration{}'.format(cluster_level)]
== cluster) & (loom.ca['nGene'] >= complexity_cutoff)
else:
print("ignoring cluster, using custom mask")
mask = (mask) & (loom.ca['nGene'] >= complexity_cutoff)
return pd.DataFrame(loom.ca[field][mask])[0].value_counts()
def get_patient_averaged_table(loom,
patient_key='patient_ID',
column_attributes=[],
n_cell_cutoff=0):
"""
Parameters
----------
loom : LoomConnection object
patient_key :
(Default value = 'patient_ID')
column_attributes :
(Default value = [])
n_cell_cutoff :
(Default value = 0)
Returns
-------
"""
unfiltered = pd.DataFrame(loom.ca[patient_key])
unfiltered.columns = [patient_key]
for column_attribute in column_attributes:
unfiltered[column_attribute] = loom.ca[column_attribute]
unfiltered.groupby(patient_key).mean()
threshold_filter = lambda x: np.sum(np.isnan(x)) > n_cell_cutoff
filtered = (
unfiltered.groupby(patient_key).apply(threshold_filter)).replace(
to_replace=False,
value=np.nan) * unfiltered.groupby(patient_key).mean()
return filtered
def generate_malignancy_score(loom,
layername,
cell_sort_key='CellSort',
patient_id_key='patient_ID',
malignant_sort_label='45neg',
cell_name_key='cellname'):
"""For calculating malignancy scores for cells based on inferred CNV. This subroutine isn't terribly future proof. <NAME> 6 June 2020.
Parameters
----------
loom : LoomConnection object
layername :
cell_sort_key :
(Default value = 'CellSort')
patient_id_key :
(Default value = 'patient_ID')
malignant_sort_label :
(Default value = '45neg')
cellname_key :
(Default value = 'cellname')from panopticon.wme import get_list_of_gene_windows)
robust_mean_windowed_expressionsfrom sklearn.decomposition import PCAfrom tqdm import tqdmcnv_scores_dict :
(Default value = {}for patient in tqdm(np.unique(bm.ca[patient_id_key]))
desc :
(Default value = 'Computing per-patient)
per-cell malignancy scores' :
cell_name_key :
(Default value = 'cellname')
Returns
-------
"""
from panopticon.wme import get_list_of_gene_windows, robust_mean_windowed_expressions
from sklearn.decomposition import PCA
from tqdm import tqdm
cnv_scores_dict = {}
cnv_quantiles_dict = {}
for patient in tqdm(
np.unique(loom.ca[patient_id_key]),
desc='Computing per-patient, per-cell malignancy scores'):
mask = loom.ca['patient_ID'] == patient
if malignant_sort_label in loom.ca[cell_sort_key][mask]:
gene_windows = get_list_of_gene_windows(loom.ra['gene'])
single_patient_expression = loom[layername][:, mask]
mwe = robust_mean_windowed_expressions(loom.ra['gene'],
gene_windows,
single_patient_expression,
upper_cut=2)
pca = PCA(n_components=1)
pca1 = pca.fit_transform(mwe.T)[:, 0]
mask1 = loom.ca[cell_sort_key][mask] == malignant_sort_label
mask2 = loom.ca[cell_sort_key][mask] != malignant_sort_label
#if loom.ca[cell_sort_key][mask]
if pca1[mask1].mean() > pca1[mask2].mean():
scores45neg = np.sum(np.greater.outer(pca1[mask1],
pca1[mask2]),
axis=1) / np.sum(mask2)
cnv_quantiles = (np.argsort(pca1) / np.sum(mask))
elif pca1[mask1].mean() < pca1[mask2].mean():
scores45neg = np.sum(np.less.outer(pca1[mask1], pca1[mask2]),
axis=1) / np.sum(mask2)
cnv_quantiles = (np.argsort(pca1) / np.sum(mask))[::-1]
elif np.sum(
mask2
) == 0: # Case where we have no non-malignant reference cells
scores45neg = [np.nan] * np.sum(mask)
cnv_quantiles = [np.nan] * np.sum(mask)
else:
raise Exception(
"Unlikely event that CNV same for 45+/- has occurred")
else:
scores45neg = []
cnv_quantiles = [np.nan] * np.sum(mask)
counter45neg = 0
for i, (cell_name, cell_sort) in enumerate(
zip(loom.ca[cell_name_key][mask],
loom.ca[cell_sort_key][mask])):
cnv_quantiles_dict[cell_name] = cnv_quantiles[i]
if cell_sort == malignant_sort_label:
cnv_scores_dict[cell_name] = scores45neg[counter45neg]
counter45neg += 1
else:
cnv_scores_dict[cell_name] = 0
if counter45neg != len(scores45neg):
raise Exception("Some 45- cells unaccounted for")
loom.ca['MalignantCNVScore'] = [
cnv_scores_dict[x] for x in loom.ca[cell_name_key]
]
loom.ca['MalignantCNVQuantile'] = [
cnv_quantiles_dict[x] for x in loom.ca[cell_name_key]
]
def get_cosine_self_similarity(loom, layername, cluster, self_mean=None):
"""
Parameters
----------
loom : LoomConnection object
layername :
cluster :
self_mean :
(Default value = None)
Returns
-------
"""
from sklearn.metrics.pairwise import cosine_similarity
clustering_level = len(str(cluster).split('-')) - 1
mask = loom.ca['ClusteringIteration{}'.format(clustering_level)] == cluster
if mask.sum() == 0:
raise Exception("Mask is empty")
if self_mean is None:
self_mean = loom[layername][:, mask].mean(axis=1)
return cosine_similarity(loom[layername][:, mask].T,
Y=np.array([self_mean]))
def get_dictionary_of_cluster_means(loom, layername, clustering_level):
"""
Parameters
----------
loom : LoomConnection object
layername :
clustering_level :
Returns
-------
"""
from tqdm import tqdm
mean_dict = {}
for cluster in tqdm(np.unique(loom.ca[clustering_level]),
desc='looping over clusters'):
mask = loom.ca[clustering_level] == cluster
if mask.sum() < 5000:
mean_dict[cluster] = loom[layername][:, mask].mean(axis=1)
else:
mean_dict[cluster] = loom[layername].map([np.mean],
selection=mask)[0]
return mean_dict
def get_differential_expression_dict(loom,
layername,
output=None,
downsample_size=500,
starting_iteration=0,
final_iteration=3,
min_cluster_size=50,
gene_alternate_name=None,
verbose=True):
"""Runs get_cluster_differential_expression over multiple clustering iterations (From ClusteringIteration(x) to ClusteringIteration(y), inclusive, where x = starting_iteration, and y = final_iteration), where ident1 is a cluster, and ident2 is the set of all other clusters which differ only in the terminal iteration (e.g. if there are clusters 0-0, 0-1, and 0-2, 1-0, and 1-1, differential expression will compare 0-0 with 0-1 and 0-2, 0-1 with 0-0 and 0-2, etc). Outputs a dictionary with each of these differential expression result, with key equal to ident1.
Parameters
----------
loom : LoomConnection object
layername : layer key of loom, over which differential expression will be computed
output : Optional filename whereto a .pkl object will be written with dictionary output, or an xlsx, with each key assigned to a separate sheet
(Default value = None)
downsample_size : Number of cells from each cluster to downsample to prior to running differential expression
(Default value = 500)
starting_iteration : if 0, will start with ClusteringIteration0, for example
(Default value = 0)
final_iteration : if 3, will continue to ClusteringIteration3, for example
(Default value = 3)
min_cluster_size : minimum size of clusters to consider (if one of clusters if below this threshold, will output nan instead of a differential expression dataframe for that particular key)
(Default value = 50)
gene_alternate_name :
(Default value = None)
verbose :
(Default value = True)
Returns
-------
"""
from panopticon.analysis import get_cluster_differential_expression
from panopticon.utilities import we_can_pickle_it
diffex = {}
for i in range(starting_iteration, final_iteration + 1):
for cluster in np.unique(loom.ca['ClusteringIteration{}'.format(i)]):
if verbose:
print(cluster)
diffex[cluster] = get_cluster_differential_expression(
loom,
layername,
cluster_level='ClusteringIteration{}'.format(i),
ident1=cluster,
ident1_downsample_size=downsample_size,
ident2_downsample_size=downsample_size,
min_cluster_size=min_cluster_size,
gene_alternate_name=gene_alternate_name)
if type(diffex[cluster]) != float:
diffex[cluster] = diffex[cluster].query(
'MeanExpr1 >MeanExpr2').head(500)
if verbose:
print(diffex[cluster].head(20))
if verbose:
print('')
if output is not None:
if output.endswith('.xlsx'):
try:
import xlsxwriter
except ImportError as e:
print(
"xlsxwriter not installed; returning output without writer to excel file"
)
return diffex
relkeys = [
x for x in diffex.keys()
if type(diffex[x]) == pd.core.frame.DataFrame
]
with pd.ExcelWriter(output, engine='xlsxwriter') as writer:
for key in relkeys:
prefix = '-'.join(str(key).split('-')[0:-1])
complement = ', '.join([
x for x in np.unique(loom.ca[
'ClusteringIteration{}'.format(
len(prefix.split('-')))])
if x != key and x.startswith(prefix)
])
sheet_name = '{} up vs. {}'.format(key, complement)
if len(
sheet_name
) >= 32: # excel doesn't like it when sheets have names with more than 31 characters
complement = complement.replace(
'{}-'.format(prefix), '-')
sheet_name = '{} up vs. {}'.format(key, complement)
if len(sheet_name) >= 32:
sheet_name = '{} up'.format(key)
diffex[key].to_excel(writer,
sheet_name=sheet_name,
index=False)
writer.save()
else:
we_can_pickle_it(diffex, output)
return diffex
def scrna2tracer_mapping(scrna_cellnames, tracer_cellnames):
"""
Parameters
----------
scrna_cellnames :
tracer_cellnames :
Returns
-------
"""
# I hate everything about this--S Markson 7 September 2020
tracer2scrna_name = {}
for tracer_cellname in tracer_cellnames:
if tracer_cellname in scrna_cellnames:
tracer2scrna_name[tracer_cellname] = tracer_cellname
else:
for pattern in ['_Lym', '_nucseq', '_45pos', '_45neg']:
if tracer_cellname + pattern in scrna_cellnames:
tracer2scrna_name[
tracer_cellname] = tracer_cellname + pattern
if tracer_cellname not in tracer2scrna_name.keys(
) and tracer_cellname.startswith('M'):
#print(tracer_cellname)
pass
return tracer2scrna_name
def get_cluster_differential_expression(loom,
layername,
cluster_level=None,
ident1=None,
ident2=None,
mask1=None,
mask2=None,
verbose=False,
ident1_downsample_size=None,
ident2_downsample_size=None,
min_cluster_size=0,
gene_alternate_name=None):
"""
Parameters
----------
loom : LoomConnection object
cluster_level :
(Default value = None)
layername :
ident1 :
(Default value = None)
ident2 :
(Default value = None)
verbose :
(Default value = False)
ident1_downsample_size :
(Default value = None)
ident2_downsample_size :
(Default value = None)
mask1 :
(Default value = None)
mask2 :
(Default value = None)
min_cluster_size :
(Default value = 0)
gene_alternate_name :
(Default value = None)
Returns
-------
"""
from scipy.stats import mannwhitneyu
from statsmodels.stats.multitest import fdrcorrection
from time import time
from tqdm import tqdm
if (mask1 is not None) and (mask2 is not None):
print("ignoring ident1, ident2")
elif (mask1 is not None) or (mask2 is not None):
raise Exception(
"Either both or neither of mask1, mask2 must be specified")
else:
if cluster_level is None:
raise Exception(
"cluster_level must be specified when running with cluster identities, i.e. without specifying an explicit mask"
)
if ident1 is None:
raise Exception(
"ident1 must be specified when running with cluster identities, i.e. without specifying an explicit mask"
)
if type(ident1) != list:
ident1 = [ident1]
mask1 = np.isin(loom.ca[cluster_level], ident1)
if ident2 == None:
print(
"Automatic complement: Cells in same subcluster except lowest subcluster"
)
if cluster_level == 'ClusteringIteration0':
mask2 = ~np.isin(loom.ca[cluster_level], ident1)
clusterset = np.unique(loom.ca[cluster_level])
ident2 = list(np.setdiff1d(clusterset, ident1))
else:
cluster_level_number = int(cluster_level[-1])
prefices = [
'-'.join(x.split('-')[0:(cluster_level_number)]) +
'-' # 13 Apr 2020--check that this works
for x in ident1
]
if len(np.unique(prefices)) > 1:
raise Exception(
"Cluster Differential expression with automatic complement must use ident1 only from cells from same n-1th subcluster"
)
prefix = prefices[0]
clusterset = [
x for x in np.unique(loom.ca[cluster_level])
if x.startswith(prefix)
]
ident2 = list(np.setdiff1d(clusterset, ident1))
mask2 = np.isin(loom.ca[cluster_level], ident2)
else:
if type(ident2) != list:
ident2 = [ident2]
mask2 = np.isin(loom.ca[cluster_level], ident2)
print("Comparison of", ident1, "against", ident2)
if (np.sum(mask1) < min_cluster_size) or (np.sum(mask2) <
min_cluster_size):
return np.nan
if ident1_downsample_size:
p = np.min([ident1_downsample_size, np.sum(mask1)]) / np.sum(mask1)
mask1 *= np.random.choice([True, False],
p=[p, 1 - p],
size=mask1.shape[0])
if ident2_downsample_size:
p = np.min([ident2_downsample_size, np.sum(mask2)]) / np.sum(mask2)
mask2 *= np.random.choice([True, False],
p=[p, 1 - p],
size=mask2.shape[0])
print(np.sum(mask1), np.sum(mask2))
pvalues = []
uvalues = []
genes = []
meanexpr1 = []
meanexpr2 = []
meanexpexpr1 = []
meanexpexpr2 = []
fracexpr1 = []
fracexpr2 = []
start = time()
data1 = loom[layername][:, mask1]
if verbose:
print('First matrix extracted in', time() - start, 'seconds')
start = time()
data2 = loom[layername][:, mask2]
if verbose:
print('Second matrix extracted', time() - start, 'seconds')
for igene, gene in enumerate(
tqdm(loom.ra['gene'], desc='Computing Mann-Whitney p-values')):
genes.append(gene)
if np.std(data1[igene, :]) + | np.std(data2[igene, :]) | numpy.std |
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def get_row(array, row):
return array[row, :]
def get_col(array, col):
return array[:, col]
if __name__ == "__main__":
with open("red_rf.field", 'rb') as f:
red = np.load(f)
with open("nir_rf.field", 'rb') as f:
nir = np.load(f)
# create a list for the calculated reflectances
R_nir = []
R_red = []
# calculate for the lower right corner of the picture
nir_zeroth = np.mean(nir[950:975, 1175:1200])
nir_first = np.mean(nir[1070:1095, 1195:1220])
nir_second = np.mean(nir[1070:1095, 1260:1285])
nir_black = np.mean(nir[625:650, 1515:1540])
R_nir.append((nir_first-nir_black)/(nir_zeroth-nir_black))
R_nir.append((nir_second-nir_black)/(nir_first-nir_black))
red_zeroth = np.mean(red[950:975, 1175:1200])
red_first = np.mean(red[1070:1095, 1195:1220])
red_second = np.mean(red[1070:1095, 1260:1285])
red_black = np.mean(red[625:650, 1515:1540])
R_red.append((red_first-red_black)/(red_zeroth-red_black))
R_red.append((red_second-red_black)/(red_first-red_black))
# calculate for the upper right corner
nir_zeroth = np.mean(nir[195:220, 1150:1175])
nir_first = np.mean(nir[195:220, 1220:1245])
nir_second = np.mean(nir[130:155, 1220:1245])
nir_black = np.mean(nir[625:650, 1515:1540])
R_nir.append((nir_first-nir_black)/(nir_zeroth-nir_black))
R_nir.append((nir_second-nir_black)/(nir_first-nir_black))
red_zeroth = np.mean(red[195:220, 1150:1175])
red_first = np.mean(red[195:220, 1220:1245])
red_second = np.mean(red[130:155, 1220:1245])
red_black = np.mean(red[625:650, 1515:1540])
R_red.append((red_first-red_black)/(red_zeroth-red_black))
R_red.append((red_second-red_black)/(red_first-red_black))
# calculate for the upper left corner
nir_zeroth = np.mean(nir[198:223, 410:435])
nir_first = np.mean(nir[198:223, 275:300])
nir_second = np.mean(nir[125:150, 275:300])
nir_black = np.mean(nir[625:650, 1515:1540])
R_nir.append((nir_first-nir_black)/(nir_zeroth-nir_black))
R_nir.append((nir_second-nir_black)/(nir_first-nir_black))
red_zeroth = np.mean(red[198:223, 410:435])
red_first = np.mean(red[198:223, 275:300])
red_second = np.mean(red[125:150, 275:300])
red_black = np.mean(red[625:650, 1515:1540])
R_red.append((red_first-red_black)/(red_zeroth-red_black))
R_red.append((red_second-red_black)/(red_first-red_black))
# calculate for the lower left corner
nir_zeroth = np.mean(nir[980:1005, 345:370])
nir_first = np.mean(nir[1075:1100, 340:365])
nir_second = | np.mean(nir[1075:1100, 250:275]) | numpy.mean |
#!/usr/bin/env python3
"""
Machine learning for PDF shapes
"""
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import time
import datetime
import numpy as np
import pickle
import pandas as pd
from scipy import stats
from scipy import signal
from scipy import ndimage
from scipy.stats.kde import gaussian_kde
from scipy.spatial import distance
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, BayesianRidge
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.svm import SVR
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans
from sklearn.externals import joblib
from imblearn.over_sampling import RandomOverSampler
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
import utilities as utilities
import lrp
# ===============================================================================
#
# Some defaults variables
#
# ===============================================================================
cmap = [
"#EE2E2F",
"#008C48",
"#185AA9",
"#F47D23",
"#662C91",
"#A21D21",
"#B43894",
"#010202",
]
dashseq = [
(None, None),
[10, 5],
[10, 4, 3, 4],
[3, 3],
[10, 4, 3, 4, 3, 4],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
]
markertype = ["s", "d", "o", "p", "h"]
# ========================================================================
#
# Function definitions
#
# ========================================================================
def load_dice(fname):
"""
Load the data and get a training ready data frame
"""
dat = np.load(fname)
df = pd.DataFrame(
{
"Z": dat["Z"].flatten(),
"Z4": dat["Z4"].flatten(),
"Z8": dat["Z8"].flatten(),
"Z16": dat["Z16"].flatten(),
"Z32": dat["Z32"].flatten(),
"C": dat["C"].flatten(),
"C4": dat["C4"].flatten(),
"C8": dat["C8"].flatten(),
"C16": dat["C16"].flatten(),
"C32": dat["C32"].flatten(),
"SRC_PV": dat["SRC_PV"].flatten(),
"rhoSRC_PV": (dat["Rho"] * dat["SRC_PV"]).flatten(),
"SRC_PV4": dat["SRC_PV4"].flatten(),
"SRC_PV8": dat["SRC_PV8"].flatten(),
"SRC_PV16": dat["SRC_PV16"].flatten(),
"SRC_PV32": dat["SRC_PV32"].flatten(),
"Zvar4": dat["Zvar4"].flatten(),
"Zvar8": dat["Zvar8"].flatten(),
"Zvar16": dat["Zvar16"].flatten(),
"Zvar32": dat["Zvar32"].flatten(),
"Cvar4": dat["Cvar4"].flatten(),
"Cvar8": dat["Cvar8"].flatten(),
"Cvar16": dat["Cvar16"].flatten(),
"Cvar32": dat["Cvar32"].flatten(),
}
)
# Clip variables
df.Z = np.clip(df.Z, 0.0, 1.0)
df.Z4 = np.clip(df.Z4, 0.0, 1.0)
df.Z8 = np.clip(df.Z8, 0.0, 1.0)
df.Z16 = np.clip(df.Z16, 0.0, 1.0)
df.Z32 = np.clip(df.Z32, 0.0, 1.0)
df.C = np.clip(df.C, 0.0, None)
df.C4 = np.clip(df.C4, 0.0, None)
df.C8 = np.clip(df.C8, 0.0, None)
df.C16 = np.clip(df.C16, 0.0, None)
df.C32 = np.clip(df.C32, 0.0, None)
return dat, df
# ========================================================================
def gen_training(df, oname="training"):
"""
Generate scaled training, dev, test arrays
"""
x_vars = get_xnames()
y_vars = get_ynames(df)
return utilities.gen_training(df, x_vars, y_vars, oname)
# ========================================================================
def get_xnames():
return ["C", "Cvar", "Z", "Zvar"]
# ========================================================================
def get_ynames(df):
return [col for col in df if col.startswith("Y")]
# ========================================================================
def closest_point(point, points):
"""Find index of closest point"""
closest_index = distance.cdist([point], np.asarray(points)).argmin()
if isinstance(points, pd.DataFrame):
return points.iloc[closest_index, :]
else:
return points[closest_index, :]
# ========================================================================
def wide_to_narrow(X, Y, bins):
"""
Convert data from predicting a Y(Zbin,Cbin) as a vector to
individual predictions of Y(Zbin,Cbin) given a Zbin and Cbin label
in the input data.
"""
varname = "variable"
valname = "Y"
x_vars = get_xnames()
dev = pd.concat([X, Y], axis=1)
left = pd.melt(
dev.reset_index(),
id_vars=x_vars + ["index"],
value_vars=Y.columns,
var_name=varname,
value_name=valname,
)
right = pd.concat([bins, pd.DataFrame(Y.columns, columns=[varname])], axis=1)
narrow = pd.merge(left, right, on=[varname]).set_index(["index", varname])
narrow = narrow.reindex(X.index, level="index")
return narrow.drop(columns=[valname]), narrow[valname]
# ========================================================================
def narrow_to_wide(Xn, Yn, idx=None):
"""
Reverse of wide_to_narrow
"""
varname = "variable"
valname = "Y"
x_vars = get_xnames()
bin_names = ["Zbins", "Cbins"]
narrow = pd.concat([Xn, Yn], axis=1).drop(columns=bin_names)
wide = narrow.reset_index().pivot(
index="index", columns="variable", values=x_vars + [valname]
)
# Get X
X = wide[x_vars].stack().xs("Y0000", level=varname)
X.index.name = None
# Get Y
Y = wide[valname]
Y.columns.name = None
Y.index.name = None
# Sort according to original wide
if idx is not None:
X = X.reindex(idx)
Y = Y.reindex(idx)
return X, Y
# ========================================================================
def fix_imbalance(df, n_clusters=10):
"""
Fix an imbalanced data set by over sampling minority classes
"""
x_vars = get_xnames()
classes = KMeans(n_clusters=n_clusters, random_state=0).fit_predict(df[x_vars])
ros = RandomOverSampler(random_state=0)
X_resampled, classes_resampled = ros.fit_sample(df, classes)
return pd.DataFrame(X_resampled, columns=df.columns)
# ========================================================================
def deprecated_gen_conditional_means_dice(df, zbin_edges, cbin_edges, oname):
"""
Generate the conditional means for a dataframe
"""
means, _, _, nbins = stats.binned_statistic_2d(
df.Z, df.C, df.rhoSRC_PV, statistic="mean", bins=[zbin_edges, cbin_edges]
)
# Plot
ma_means = np.ma.array(means, mask=np.isnan(means))
cm = matplotlib.cm.viridis
cm.set_bad("white", 1.0)
plt.figure(0)
plt.clf()
im = plt.imshow(
ma_means.T,
extent=[
np.min(zbin_edges),
np.max(zbin_edges),
np.min(cbin_edges),
np.max(cbin_edges),
],
origin="lower",
aspect="auto",
cmap=cm,
)
plt.colorbar(im)
plt.xlabel("Mixture Fraction")
plt.ylabel("Progress Variable")
plt.title("Conditional means")
plt.tight_layout()
plt.savefig(oname + ".png", format="png", dpi=300, bbox_inches="tight")
# Fix nans
means[np.isnan(means)] = 0.0
# Save for later
np.savez_compressed(
oname + ".npz",
means=means,
zbin_edges=zbin_edges,
cbin_edges=cbin_edges,
nbins=nbins,
)
return means
# ========================================================================
def jensen_shannon_divergence(p, q):
"""
This will be part of scipy as some point.
See https://github.com/scipy/scipy/pull/8295
We use this implementation for now: https://stackoverflow.com/questions/15880133/jensen-shannon-divergence
:param p: PDF (normalized to 1)
:type p: array
:param q: PDF (normalized to 1)
:type q: array
"""
eps = 1e-13
M = np.clip(0.5 * (p + q), eps, None)
return 0.5 * (stats.entropy(p, M) + stats.entropy(q, M))
# ========================================================================
def jensen_shannon_distance(p, q):
"""
Jensen-Shannon distance
:param p: PDF (normalized to 1)
:type p: array
:param q: PDF (normalized to 1)
:type q: array
"""
return np.sqrt(jensen_shannon_divergence(p, q))
# ========================================================================
def calculate_jsd(y, yp):
"""
Calculate the JSD metric on each PDF prediction
"""
y = np.asarray(y, dtype=np.float64)
yp = np.asarray(yp, dtype=np.float64)
return np.array(
[jensen_shannon_divergence(y[i, :], yp[i, :]) for i in range(y.shape[0])]
)
# ========================================================================
def pdf_distances(base, datadir="data"):
"""
Compute the minimum JSD between all PDFs in base dice with all the
other dice PDFs.
Only do this with PDFs in the dev set as this is an expensive operation.
:param base: baseline dice
:type base: str
:param datadir: data directory
:type datadir: str
:return: minimum distances
:rtype: dataframe
"""
others = [
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
]
try:
others.remove(base)
except ValueError:
pass
# Read the baseline PDFs
Ydev_base = pd.read_pickle(os.path.join(datadir, f"{base}_ydev.gz"))
# Compute all the distances and keep the minimum for each baseline sample
distances = {}
for k, other in enumerate(others):
# Get pairwise distance matrix
Ydev_other = pd.read_pickle(os.path.join(datadir, f"{other}_ydev.gz"))
d = distance.cdist(Ydev_base, Ydev_other, jensen_shannon_divergence)
# Find the minimum distance from other to base
idx = d.argmin(axis=0)
distances[other] = pd.DataFrame(index=Ydev_other.index)
distances[other]["r"] = d[idx, np.arange(0, Ydev_other.shape[0])]
distances[other]["idx"] = Ydev_base.index[idx]
# Save
with open(os.path.join(datadir, f"{base}_pdf_distances.pkl"), "wb") as f:
pickle.dump(distances, f, pickle.HIGHEST_PROTOCOL)
return distances
# ========================================================================
def clip_normalize(y):
"""
Clip and normalize (along axis=1)
"""
y = np.clip(y, 0, 1)
return y / np.sum(y, axis=1, keepdims=True)
# ========================================================================
def rmse_metric(true, predicted):
return np.sqrt(mean_squared_error(true, predicted))
# ========================================================================
def error_metrics(true, predicted, verbose=False):
"""
Compute some error metrics
"""
rmse = rmse_metric(true, predicted)
mae = mean_absolute_error(true, predicted)
r2 = r2_score(true, predicted)
if verbose:
print(f"RMSE: {rmse:.3f}")
print(f"MAE: {mae:.3f}")
print(f"R2: {r2:.3f}")
return rmse, mae, r2
# ========================================================================
def src_pv_normalization(
dices=[
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
],
datadir="data",
):
"""Compute the normalization constant"""
src_pv_sum = 0.0
count = 0
for dice in dices:
pdf = pd.read_pickle(os.path.join(datadir, f"{dice}_pdfs.gz"))
src_pv_sum += np.sum(pdf.SRC_PV ** 2)
count += pdf.SRC_PV.shape[0]
return np.sqrt(src_pv_sum / count)
# ========================================================================
def convolution_means(pdf, means):
"""
Perform the PDF convolution given means
means can be one for each PDF or means_dice.flatten(order='F')
:param pdf: predictions from model (model.predict(X))
:type pdf: array
:param means: conditional means
:type means: array
"""
return np.sum(means * pdf, axis=1)
# ========================================================================
def create_logdir(model_name):
"""Create a log directory for a model"""
time = datetime.datetime.now().strftime("%b%d_%H-%M-%S")
logdir = os.path.abspath(os.path.join("runs", f"{time}_{model_name}"))
if not os.path.exists(logdir):
os.makedirs(logdir)
return logdir
# ========================================================================
def lr_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a Linear Regression
"""
model_name = "LR"
logdir = create_logdir(model_name)
# Training
LR = LinearRegression().fit(Xtrain, Ytrain)
joblib.dump(LR, os.path.join(logdir, model_name + ".pkl"))
mtrain = clip_normalize(LR.predict(Xtrain))
mdev = clip_normalize(LR.predict(Xdev))
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, LR
# ========================================================================
def br_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a Bayesian ridge regression
"""
model_name = "BR"
logdir = create_logdir(model_name)
# Training
BR = MultiOutputRegressor(BayesianRidge()).fit(Xtrain, Ytrain)
joblib.dump(BR, os.path.join(logdir, model_name + ".pkl"))
mtrain = BR.predict(Xtrain)
mdev = BR.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, BR
# ========================================================================
def pr_training(Xtrain, Xdev, Ytrain, Ydev, order=6):
"""
Train using a polynomial regression
"""
model_name = f"PR{order}"
logdir = create_logdir(model_name)
# Training
PR = Pipeline(
[
("poly", PolynomialFeatures(degree=order)),
("linear", LinearRegression(fit_intercept=False)),
]
)
PR = PR.fit(Xtrain, Ytrain)
joblib.dump(PR, os.path.join(logdir, model_name + ".pkl"))
mtrain = clip_normalize(PR.predict(Xtrain))
mdev = clip_normalize(PR.predict(Xdev))
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, PR
# ========================================================================
def svr_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a support vector regression
"""
model_name = "SVR"
logdir = create_logdir(model_name)
# Training
svr = MultiOutputRegressor(SVR(kernel="rbf", epsilon=1e-3))
grid_param_svr = {
"estimator__C": [1e0, 1e1, 1e2, 1e3],
"estimator__gamma": np.logspace(-2, 2, 5),
}
SR = GridSearchCV(estimator=svr, param_grid=grid_param_svr, cv=5, n_jobs=-1).fit(
Xtrain, Ytrain
)
print("Best estimator and parameter set found on training set:")
print(SR.best_estimator_)
print(SR.best_params_)
joblib.dump(SR, os.path.join(logdir, model_name + ".pkl"))
mtrain = SR.predict(Xtrain)
mdev = SR.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, SR
# ========================================================================
def gp_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a gaussian process regression
"""
model_name = "GP"
logdir = create_logdir(model_name)
# Training
kernel = 6.2 ** 2 * Matern(
length_scale=[1, 1, 1, 1], length_scale_bounds=(1e-1, 1e4), nu=1.5
) + WhiteKernel(noise_level=2, noise_level_bounds=(1e-1, 3e0))
GP = GaussianProcessRegressor(
kernel=kernel, alpha=0, n_restarts_optimizer=3, normalize_y=True
).fit(Xtrain, Ytrain)
print("Trained GP kernel:", GP.kernel_)
joblib.dump(GP, os.path.join(logdir, model_name + ".pkl"))
mtrain = GP.predict(Xtrain)
mdev = GP.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, GP
# ========================================================================
def count_rf_parameters(model):
return np.sum([t.tree_.node_count for t in model.estimators_])
# ========================================================================
def rf_training(Xtrain, Xdev, Ytrain, Ydev, nestim=100, max_depth=30):
"""
Train using a Random Forest Regression
"""
# Setup
model_name = "RF"
logdir = create_logdir(model_name)
np.random.seed(985_721)
# Training
start = time.time()
RF = RandomForestRegressor(n_estimators=nestim, max_depth=max_depth, n_jobs=1).fit(
Xtrain, Ytrain
)
end = time.time() - start
joblib.dump(RF, os.path.join(logdir, model_name + ".pkl"))
print("Trained RandomForest")
print(" Feature importance", RF.feature_importances_)
mtrain = RF.predict(Xtrain)
mdev = RF.predict(Xdev)
# Summarize training
summarize_training(
Ytrain,
mtrain,
Ydev,
mdev,
fname=os.path.join(logdir, model_name + ".log"),
timing=end,
dofs=count_rf_parameters(RF),
)
return mtrain, mdev, RF
# ========================================================================
def betaPDF(mean, var, centers, eps=1e-6):
"""
Calculate beta PDF
:param mean: mean
:type mean: float
:param var: variance
:type var: float
:param centers: bin centers
:type centers: array
:param eps: smallness threshold
:type eps: float
:return: pdf
:rtype: array
"""
pdf = np.zeros(centers.shape)
if var < eps:
if mean > np.max(centers):
pdf[-1] = 1.0
return pdf
else:
idx = np.argmax(centers > mean)
if (idx == 0) or (idx == len(pdf) - 1):
pdf[idx] = 1.0
return pdf
else:
pdf[idx - 1] = (centers[idx] - mean) / (centers[idx] - centers[idx - 1])
pdf[idx] = (mean - centers[idx - 1]) / (centers[idx] - centers[idx - 1])
return pdf
elif var > mean * (1.0 - mean):
pdf[0] = 1.0 - mean
pdf[-1] = mean
return pdf
else:
a = mean * (mean * (1.0 - mean) / var - 1.0)
b = a / mean - a
ni = 1024
x = np.linspace(0, 1, ni)
pdf = np.interp(centers, x, stats.beta.pdf(x, a, b))
pdf /= np.sum(pdf)
return pdf
# ========================================================================
class AnalyticalPDFModel:
"""Generic analytical PDF model"""
def __init__(self, zbin_edges, cbin_edges):
"""
:param zbin_edges: bin edges for Z
:type bins: array
:param cbin_edges: bin edges for C
:type bins: array
"""
self.zbin_edges = zbin_edges
self.cbin_edges = cbin_edges
self.eps = 1e-13
self.cscale = cbin_edges[-1]
self.nc = len(cbin_edges) - 1
self.nz = len(zbin_edges) - 1
self.cbin_centers = utilities.edges_to_midpoint(cbin_edges)
self.zbin_centers = utilities.edges_to_midpoint(zbin_edges)
self.cbin_widths = np.diff(cbin_edges)
self.zbin_widths = np.diff(zbin_edges)
self.seed = 9_023_457
# ========================================================================
class DD(AnalyticalPDFModel):
"""
delta(Z) - delta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
# Get indexes for the bins
self.zbin_edges[-1] += self.eps
self.cbin_edges[-1] += self.eps
idx_z = np.digitize(X.Z, self.zbin_edges)
idx_c = np.digitize(X.C, self.cbin_edges)
# Generate delta PDFs
return np.array(
[
signal.unit_impulse(
(self.nz, self.nc), (idx_z[i] - 1, idx_c[i] - 1)
).flatten(order="F")
for i in range(X.shape[0])
]
)
# ========================================================================
class BD(AnalyticalPDFModel):
"""
beta(Z) - delta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
self.cbin_edges[-1] += self.eps
idx_c = np.digitize(X.C, self.cbin_edges)
# Generate beta-delta PDFs
npdfs = X.shape[0]
pdfs = np.zeros((X.shape[0], self.nz * self.nc))
np.random.seed(self.seed)
for i in range(npdfs):
c_pdf = signal.unit_impulse(self.nc, idx_c[i] - 1)
z_pdf = betaPDF(X.Z.iloc[i], X.Zvar.iloc[i], self.zbin_centers)
pdfs[i, :] = np.outer(z_pdf, c_pdf).flatten(order="F")
return pdfs
# ========================================================================
class BB(AnalyticalPDFModel):
"""
beta(Z) - beta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
# Generate beta-delta PDFs
npdfs = X.shape[0]
pdfs = np.zeros((X.shape[0], self.nz * self.nc))
np.random.seed(self.seed)
for i in range(npdfs):
c_pdf = betaPDF(
X.C.iloc[i] / self.cscale,
X.Cvar.iloc[i] / (self.cscale ** 2),
self.cbin_centers / self.cscale,
)
z_pdf = betaPDF(X.Z.iloc[i], X.Zvar.iloc[i], self.zbin_centers)
pdfs[i, :] = np.outer(z_pdf, c_pdf).flatten(order="F")
return pdfs
# ========================================================================
# Torch Variable handler
class VariableHandler:
def __init__(self, device=torch.device("cpu"), dtype=torch.float):
self.device = device
self.dtype = dtype
def tovar(self, input):
return Variable(torch.as_tensor(input, dtype=self.dtype, device=self.device))
# ========================================================================
# Network Architecture from infoGAN (https://arxiv.org/abs/1606.03657)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.input_height = 32
self.input_width = 64
self.input_dim = 1 + 4
self.output_dim = 1
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 64, 4, 2, 1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
)
self.fc = nn.Sequential(
nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4), 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2),
nn.Linear(1024, self.output_dim),
nn.Sigmoid(),
)
def forward(self, input, label):
x = torch.cat([input, label], 1)
x = self.conv(x)
x = x.view(-1, 128 * (self.input_height // 4) * (self.input_width // 4))
x = self.fc(x)
return x
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
# ========================================================================
class SoftmaxImage(nn.Module):
"""Apply Softmax on an image.
Softmax2d applies on second dimension (i.e. channels), which is
not what I want. This applies along the H and W dimensions, where
(N, C, H, W) is the size of the input.
"""
def __init__(self, channels, height, width):
super(SoftmaxImage, self).__init__()
self.channels = channels
self.height = height
self.width = width
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
x = x.view(-1, self.channels, self.height * self.width)
x = self.softmax(x)
x = x.view(-1, self.channels, self.height, self.width)
return x
# ========================================================================
# Network Architecture from infoGAN (https://arxiv.org/abs/1606.03657)
class Generator(nn.Module):
def __init__(self, noise_size, vh=None):
super(Generator, self).__init__()
self.input_height = 32
self.input_width = 64
self.noise_size = noise_size
self.input_dim = noise_size + 4
self.output_dim = 1
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.fc = nn.Sequential(
nn.Linear(self.input_dim, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(1024, 128 * (self.input_height // 4) * (self.input_width // 4)),
nn.BatchNorm1d(128 * (self.input_height // 4) * (self.input_width // 4)),
nn.ReLU(),
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
SoftmaxImage(1, self.input_height, self.input_width),
)
def forward(self, input, label):
x = torch.cat([input, label], 1)
x = self.fc(x)
x = x.view(-1, 128, (self.input_height // 4), (self.input_width // 4))
x = self.deconv(x)
return x
def inference(self, x):
noise = self.vh.tovar(torch.rand(x.shape[0], self.noise_size))
return self.forward(noise, x)
def predict(self, X, batch_size=64, nestim=1):
X = np.asarray(X, dtype=np.float64)
n = X.shape[0]
meval = np.zeros((n, self.input_height * self.input_width))
for batch, i in enumerate(range(0, n, batch_size)):
slc = np.s_[i : i + batch_size, :]
xsub = self.vh.tovar(X[slc])
meval[slc] = np.mean(
[
self.inference(xsub).cpu().data.numpy().reshape(xsub.shape[0], -1)
for j in range(nestim)
]
)
return meval
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
# ========================================================================
def cgan_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a conditional GAN
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.double
vh = VariableHandler(device=device, dtype=dtype)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# Sizes
batch_size = 64
input_height = 32
input_width = 64
nsample_lbls = 16
nsample_noise = 10
noise_size = 100
nlabels = Xtrain.shape[1]
torch.manual_seed(5_465_462)
# Construct the G and D models
D = Discriminator().to(device=device, dtype=dtype)
G = Generator(noise_size, vh).to(device=device, dtype=dtype)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
lr_D = 1e-3
lr_G = 1e-3
decay_rate = 0.98
# Loss and optimizers
criterion = nn.BCELoss().to(device=device)
D_optimizer = optim.SGD(D.parameters(), lr=lr_D, momentum=0.5, nesterov=True)
G_optimizer = optim.SGD(G.parameters(), lr=lr_G, momentum=0.5, nesterov=True)
D_scheduler = optim.lr_scheduler.StepLR(D_optimizer, step_size=1, gamma=decay_rate)
G_scheduler = optim.lr_scheduler.StepLR(G_optimizer, step_size=1, gamma=decay_rate)
# Tensorboard writer
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "CGAN"
# Validation images, labels and noise
xdev_sub = vh.tovar(Xdev[:nsample_lbls, :])
ydev_sub = vh.tovar(Ydev[:nsample_lbls, :])
valimgs = ydev_sub.view(nsample_lbls, -1, input_height, input_width)
vallbl = xdev_sub.expand(input_height, input_width, nsample_lbls, nlabels).permute(
2, 3, 0, 1
)
grid = vutils.make_grid(valimgs, nrow=nsample_lbls, normalize=True, scale_each=True)
writer.add_image("True PDF", grid, 0)
fixed_noise = vh.tovar(
torch.rand(nsample_noise, noise_size)
.to(device=device)
.repeat(1, nsample_lbls)
.reshape(-1, noise_size)
)
fixed_labels = xdev_sub.repeat(nsample_noise, 1)
# Graphs in Tensorboard
xdummy = vh.tovar(torch.rand(1, 1, input_height, input_width))
ldummy = vh.tovar(torch.rand(1, nlabels, input_height, input_width))
writer.add_graph(D, (xdummy, ldummy), verbose=False)
writer.add_graph(G, (fixed_noise, fixed_labels), verbose=False)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
D.train()
for epoch in range(nepochs):
G.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_x = vh.tovar(Xtrain[indices, :])
batch_y = vh.tovar(Ytrain[indices, :])
# Reshape these for the D network
actual_batch_size = batch_x.shape[0]
labels = batch_x.expand(
input_height, input_width, actual_batch_size, nlabels
).permute(2, 3, 0, 1)
imgs = batch_y.view(actual_batch_size, -1, input_height, input_width)
noise = vh.tovar(torch.rand((actual_batch_size, noise_size)))
# Real and fake labels
real_label = vh.tovar(torch.ones(actual_batch_size, 1))
fake_label = vh.tovar(torch.zeros(actual_batch_size, 1))
# update the D network
D_optimizer.zero_grad()
D_real = D(imgs, labels)
D_real_loss = criterion(D_real, real_label)
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
D_fake_loss = criterion(D_fake, fake_label)
D_loss = D_real_loss + D_fake_loss
writer.add_scalar("D_real_loss", D_real_loss.item(), step)
writer.add_scalar("D_fake_loss", D_fake_loss.item(), step)
writer.add_scalar("D_loss", D_loss.item(), step)
D_loss.backward()
D_optimizer.step()
# update G network
G_optimizer.zero_grad()
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
G_loss = criterion(D_fake, real_label)
writer.add_scalar("G_loss", G_loss.item(), step)
G_loss.backward()
G_optimizer.step()
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], D_loss: {4:.4e}, G_loss: {5:.4e}".format(
epoch + 1,
nepochs,
batch + 1,
nbatches,
D_loss.item(),
G_loss.item(),
)
)
# Adaptive time step
G_scheduler.step()
D_scheduler.step()
for param_group in D_optimizer.param_groups:
print("Current learning rate for discriminator:", param_group["lr"])
for param_group in G_optimizer.param_groups:
print(" for generator:", param_group["lr"])
# Visualize results in Tensorboard
G.eval()
samples = G(fixed_noise, fixed_labels)
grid = vutils.make_grid(
samples, nrow=nsample_lbls, normalize=True, scale_each=True
)
writer.add_image("Generator", grid, step)
# Save the models
torch.save(G.state_dict(), os.path.join(logdir, model_name + "_G.pkl"))
torch.save(D.state_dict(), os.path.join(logdir, model_name + "_D.pkl"))
writer.close()
# Stuff we need to do to get plots...
G.eval()
mtrain = G.predict(Xtrain)
mdev = G.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, G
# ========================================================================
# Conditional variational autoencoder
# CVAE paper: Learning Structured Output Representation using Deep Conditional Generative Models
# https://papers.nips.cc/paper/5775-learning-structured-output-representation-using-deep-conditional-generative-models
# code adapted from https://github.com/timbmg/VAE-CVAE-MNIST/blob/master/models.py
class CVAE(nn.Module):
def __init__(
self, encoder_layer_sizes, latent_size, decoder_layer_sizes, nlabels=0, vh=None
):
super(CVAE, self).__init__()
self.latent_size = latent_size
self.decoder_layer_sizes = decoder_layer_sizes
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.encoder = Encoder(encoder_layer_sizes, latent_size, nlabels).to(
device=vh.device, dtype=vh.dtype
)
self.decoder = Decoder(decoder_layer_sizes, latent_size, nlabels).to(
device=vh.device, dtype=vh.dtype
)
def forward(self, x, c):
batch_size = x.size(0)
means, log_var = self.encoder(x, c)
std = torch.exp(0.5 * log_var)
eps = self.vh.tovar(torch.randn([batch_size, self.latent_size]))
z = eps * std + means
recon_x = self.decoder(z, c)
return recon_x, means, log_var, z
def inference(self, c):
z = self.vh.tovar(torch.randn(c.shape[0], self.latent_size))
recon_x = self.decoder(z, c)
return recon_x
def predict(self, X, batch_size=64, nestim=1):
X = np.asarray(X, dtype=np.float64)
n = X.shape[0]
meval = np.zeros((n, self.decoder_layer_sizes[-1]))
for batch, i in enumerate(range(0, n, batch_size)):
slc = np.s_[i : i + batch_size, :]
c = self.vh.tovar(X[slc])
meval[slc] = np.mean(
[self.inference(c).cpu().data.numpy() for j in range(nestim)], axis=0
)
return meval
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
class Encoder(nn.Module):
def __init__(self, layer_sizes, latent_size, nlabels):
super(Encoder, self).__init__()
self.MLP = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
self.MLP.add_module(name="L%i" % (i), module=nn.Linear(in_size, out_size))
self.MLP.add_module(name="A%i" % (i), module=nn.ReLU())
self.linear_means = nn.Linear(layer_sizes[-1], latent_size)
self.linear_log_var = nn.Linear(layer_sizes[-1], latent_size)
def forward(self, x, c):
x = torch.cat((x, c), dim=-1)
x = self.MLP(x)
means = self.linear_means(x)
log_vars = self.linear_log_var(x)
return means, log_vars
class Decoder(nn.Module):
def __init__(self, layer_sizes, latent_size, nlabels):
super(Decoder, self).__init__()
self.MLP = nn.Sequential()
input_size = latent_size + nlabels
for i, (in_size, out_size) in enumerate(
zip([input_size] + layer_sizes[:-1], layer_sizes)
):
self.MLP.add_module(name="L%i" % (i), module=nn.Linear(in_size, out_size))
if i + 1 < len(layer_sizes):
self.MLP.add_module(name="A%i" % (i), module=nn.ReLU())
else:
self.MLP.add_module(name="softmax", module=nn.Softmax(dim=1))
def forward(self, z, c):
z = torch.cat((z, c), dim=-1)
x = self.MLP(z)
return x
def loss_fn(recon_x, x, mean, log_var):
BCE = nn.functional.binary_cross_entropy(recon_x, x, reduction="sum")
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
return BCE + KLD
def cvae_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a conditional VAE
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
vh = VariableHandler(device=device, dtype=torch.double)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# Sizes
nlabels = Xtrain.shape[1]
input_size = Ytrain.shape[1]
batch_size = 64
encoder_layer_sizes = [input_size + nlabels, 512, 256]
latent_size = 10
decoder_layer_sizes = [256, 512, input_size]
torch.manual_seed(5_465_462)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
lr = 1e-3
# CVAE model
cvae = CVAE(
encoder_layer_sizes=encoder_layer_sizes,
latent_size=latent_size,
decoder_layer_sizes=decoder_layer_sizes,
nlabels=nlabels,
vh=vh,
).to(device=device)
# Optimizer
optimizer = optim.Adam(cvae.parameters(), lr=lr)
# Tensorboard writer
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "CVAE"
# Graphs in Tensorboard
xdummy = vh.tovar(torch.rand(1, input_size))
ldummy = vh.tovar(torch.rand(1, nlabels))
writer.add_graph(cvae, (xdummy, ldummy), verbose=False)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
start = time.time()
for epoch in range(nepochs):
cvae.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_c = vh.tovar(Xtrain[indices, :])
batch_x = vh.tovar(Ytrain[indices, :])
# Forward model
recon_x, mean, log_var, z = cvae(batch_x, batch_c)
# Loss
loss = loss_fn(recon_x, batch_x, mean, log_var)
writer.add_scalar("loss", loss, step)
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], Loss: {4:.4e}".format(
epoch + 1, nepochs, batch + 1, nbatches, loss
)
)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Save the models
torch.save(cvae.state_dict(), os.path.join(logdir, model_name + ".pkl"))
end = time.time() - start
writer.close()
cvae.eval()
mtrain = cvae.predict(Xtrain)
mdev = cvae.predict(Xdev)
# Summarize training
summarize_training(
Ytrain,
mtrain,
Ydev,
mdev,
fname=os.path.join(logdir, model_name + ".log"),
timing=end,
dofs=count_parameters(cvae),
)
return mtrain, mdev, cvae
# ========================================================================
# Fully connected NN
class Net(nn.Module):
def __init__(self, input_size, layer_sizes, vh=None):
super(Net, self).__init__()
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.input_size = input_size
self.layer_sizes = layer_sizes
self.MLP = nn.Sequential()
for i, (in_size, out_size) in enumerate(
zip([input_size] + layer_sizes[:-1], layer_sizes)
):
if i + 1 < len(layer_sizes):
self.MLP.add_module(
name="L%i" % (i), module=nn.Linear(in_size, out_size)
)
self.MLP.add_module(name="A%i" % (i), module=nn.LeakyReLU())
self.MLP.add_module(name="B%i" % (i), module=nn.BatchNorm1d(out_size))
else:
self.MLP.add_module(
name="L%i" % (i), module=nn.Linear(in_size, out_size)
)
self.MLP.add_module(name="softmax", module=nn.Softmax(dim=1))
def forward(self, x):
return self.MLP(x)
def predict(self, X, batch_size=64):
X = | np.asarray(X, dtype=np.float64) | numpy.asarray |
import simpy
import sys
sys.path
import random
import numpy as np
import torch
from tabulate import tabulate
import sequencing
import routing
class machine:
def __init__(self, env, index, *args, **kwargs):
# initialize the environment of simulation
self.env = env
self.m_idx = index
# each machine will have an independent storage for each type of job information
# initialize all job-related information storage as empty lists
self.queue = []
self.sequence_list = [] # sequence of all queuing jobs
self.pt_list = [] # processing time
self.remaining_pt_list = [] # average processing time
self.due_list = [] # due for each job
self.arrival_time_list = [] # time that job join the queue
self.waited_time = [] # time that job stayed in the queue
self.slack_upon_arrival = [] # slack record of queuing jobs
self.no_jobs_record = []
# the time that agent do current and next decision
self.decision_point = 0
self.release_time = 0
# track the utilization
self.cumulative_run_time = 0
self.global_exp_tard_rate = 0
# Initialize the possible events during production
self.sufficient_stock = self.env.event()
# working condition in shut down and breakdowns
self.working_event = self.env.event()
# this is the time that machine needs to recover from breakdown
# initial value is 0, later will be changed by "breakdown_creation" module
self.restart_time = 0
self.count = 0
self.count2 = 0
# Initialize the events'states
# if the queue is not empty
if not len(self.queue):
self.sufficient_stock.succeed()
# no shutdown, no breakdown at beginning
self.working_event.succeed()
# print out the information of initial jobs
self.print_info = True
self.routing_global_reward = False
# initialize the data for learning and recordiing
self.breakdown_record = []
# use exponential moving average to measure slack and tardiness
self.EMA_slack_change = 0
self.EMA_realized_tardiness = 0
self.EMA_alpha = 0.1
# set the sequencing rule before start of simulation
if 'rule' in kwargs:
order = "self.job_sequencing = sequencing." + kwargs['rule']
try:
exec(order)
print("machine {} uses {} sequencing rule".format(self.m_idx, kwargs['rule']))
except:
print("Rule assigned to machine {} is invalid !".format(self.m_idx))
raise Exception
else:
# default sequencing rule is FIFO
self.job_sequencing = sequencing.FIFO
# record extra data for learning, initially not activated, can be activated by brains
self.sequencing_learning_event = self.env.event()
self.routing_learning_event = self.env.event()
'''
1. downwards are functions that perform the simulation
including production, starvation and breakdown
'''
# this function should be called after __init__ to avoid deadlock
# after the creation of all machines, initial jobs and work centers
# pass the list of work centers to all machines so the shopfloor is established
# the initial jobs are allocated through job_creation module
def initialization(self, machine_list, workcenter_list, job_creator, assigned_wc):
# knowing other machines, workcenters, and the job creator
# so the machine agent can manipulate other agents'variables
self.m_list = machine_list
self.m_no = len(self.m_list)
self.wc_list = workcenter_list
self.wc = assigned_wc
self.wc_idx = assigned_wc.wc_idx
self.no_ops = len(self.wc_list)
self.job_creator = job_creator
# initial information
if self.print_info:
print('machine {} belongs to work center {}'.format(self.m_idx,assigned_wc.wc_idx))
print('Initial %s jobs at machine %s are:'%(len(self.queue), self.m_idx))
job_info = [[self.queue[i],self.sequence_list[i], self.pt_list[i], self.slack_upon_arrival[i], self.due_list[i]] for i in range(len(self.queue))]
print(tabulate(job_info, headers=['idx.','sqc.','proc.t.','slack','due']))
print('************************************')
self.state_update_all()
self.update_global_info_progression()
self.env.process(self.production())
# The main function, simulates the production
def production(self):
# first check the initial queue/stock level, if none, starvation begines
if not len(self.queue):
# triggered the starvation
yield self.env.process(self.starvation())
# update information of queuing jobs at the end of initial phase
self.state_update_all()
# the loop that will run till the ned of simulation
while True:
# record the time of the sequencing decision (select a job to process), used as the index of produciton record in job creator
self.decision_point = self.env.now
self.no_jobs_record.append(len(self.queue))
# if we have more than one queuing jobs, sequencing is required
if len(self.queue)-1:
# determine the next job to be processed
# the returned value is selected job's self.position in queue
self.position = self.job_sequencing(self.sequencing_data_generation())
self.job_idx = self.queue[self.position]
self.before_operation()
self.count += 1
if len(self.queue)-2:
self.count2 += 1
#print("Sequencing: Machine %s choose job %s at time %s"%(self.m_idx,self.job_idx,self.env.now))
# otherwise simply select the first(only) one
else:
self.position = 0
self.job_idx = self.queue[self.position]
#print("One queue: Machine %s process job %s at time %s"%(self.m_idx,self.job_idx,self.env.now))
# retrive the information of job
pt = self.pt_list[self.position][self.m_idx] # processing time of the selected job
wait = self.env.now - self.arrival_time_list[self.position] # time that job waited before being selected
# after determined the next job to be processed, update a bunch of data
self.update_global_info_progression()
self.update_global_info_anticipation(pt)
self.record_production(pt, wait) # record these information
# The production process (yield the processing time of operation)
yield self.env.timeout(pt)
self.cumulative_run_time += pt
#print("completion: Job %s leave machine %s at time %s"%(self.queue[self.position],self.m_idx,self.env.now))
# transfer job to next workcenter or delete it, and update information
self.after_operation()
# check if routing learning mode is on, if yes, call the function of WORKCENTER, NOT ITSELF!!!
# examine whether the scheduled shutdown is triggered
if not self.working_event.triggered:
yield self.env.process(self.breakdown())
# after restart, update information of queuing jobs
self.state_update_all()
# check the queue/stock level, if none, starvation begines
if not len(self.queue):
# triggered the starvation
yield self.env.process(self.starvation())
# after replenishement, update information of queuing jobs
self.state_update_all()
def starvation(self):
#print('STARVATION *BEGIN*: machine %s at time %s' %(self.m_idx, self.env.now))
# set the self.sufficient_stock event to untriggered
self.sufficient_stock = self.env.event()
# proceed only if the sufficient_stock event is triggered by new job arrival
yield self.sufficient_stock
# examine whether the scheduled shutdown is triggered
if not self.working_event.triggered:
yield self.env.process(self.breakdown())
#print('STARVATION *END*: machine %s at time: %s'%(self.m_idx, self.env.now))
def breakdown(self):
print('********', self.m_idx, "breakdown at time", self.env.now, '********')
start = self.env.now
# simply update the available time of that machines
self.available_time = self.restart_time + self.cumulative_pt
# suspend the production here, untill the working_event is triggered
yield self.working_event
self.breakdown_record.append([(start, self.env.now-start), self.m_idx])
print('********', self.m_idx, 'brekdown ended, restart production at time', self.env.now, '********')
'''
2. downwards are functions the called before and after each operation
to maintain some record, and transit the finished job to next workcenter or out of system
'''
# update lots information that will be used for calculating the rewards
def before_operation(self):
# number of jobs that to be sequenced, and their ttd and slack
self.waiting_jobs = len(self.queue)
time_till_due = np.array(self.due_list) - self.env.now
self.before_op_ttd = time_till_due
self.before_op_ttd_chosen = self.before_op_ttd[self.position]
self.before_op_ttd_loser = np.delete(self.before_op_ttd, self.position)
tardy_jobs = len(time_till_due[time_till_due<0])
#self.before_op_realized_tard_rate =tardy_jobs/len(self.queue)
#print('before realized tard rate: ', self.before_op_realized_tard_rate)
initial_slack = self.slack_upon_arrival.copy()
self.before_op_remaining_pt = self.remaining_job_pt + self.current_pt
self.before_op_remaining_pt_chosen = self.before_op_remaining_pt[self.position]
self.before_op_remaining_pt_loser = np.delete(self.before_op_remaining_pt, self.position)
current_slack = time_till_due - self.before_op_remaining_pt
exp_tardy_jobs = len(current_slack[current_slack<0])
# get information of all jobs before operation
self.before_op_exp_tard = current_slack[current_slack<0]
self.before_op_sum_exp_tard = self.before_op_exp_tard.sum()
self.before_op_slack = current_slack
self.before_op_sum_slack = self.before_op_slack.sum()
# calculate the critical level of all queuing jobs
self.critical_level = 1 - current_slack / 100
self.critical_level_chosen = self.critical_level[self.position]
#print(current_slack, self.critical_level,self.critical_level_chosen)
# get the information of the selected job
self.pt_chosen = self.current_pt[self.position]
self.initial_slack_chosen = initial_slack[self.position]
self.before_op_slack_chosen = current_slack[self.position]
self.before_op_exp_tard_chosen = min(0,self.before_op_slack_chosen)
self.before_op_winq_chosen = self.winq[self.position]
# get the information of jobs that haven't been selected (loser)
self.before_op_slack_loser = np.delete(current_slack, self.position) # those haven't been selected
self.critical_level_loser = np.delete(self.critical_level, self.position)
self.before_op_sum_exp_tard_loser = self.before_op_slack_loser[self.before_op_slack_loser<0].sum()
self.before_op_sum_slack_loser = self.before_op_slack_loser.sum()
self.before_op_winq_loser = np.delete(self.winq, self.position)
#print('before',self.m_idx,self.env.now,slack,slack_loser,self.before_op_exp_tard,self.current_pt,self.position)
#self.before_op_avg_slack = slack.sum()/len(self.queue)
#self.before_op_expected_tard_rate = exp_tardy_jobs/len(self.queue)
#print('before expected tard rate: ', self.before_op_expected_tard_rate)
# transfer unfinished job to next workcenter, or delete finished job from record
# and update the data of queuing jobs, EMA_tardiness etc.
def after_operation(self):
# check if this is the last operation of job
# if the sequence is not empty, any value > 0 is True
if len(self.sequence_list[self.position]):
#print('OPERATION: Job %s output from machine %s at time %s'%(self.queue[self.position], self.m_idx, self.env.now))
next_wc = self.sequence_list[self.position][0]
# add the job to next work center's queue
self.wc_list[next_wc].queue.append(self.queue.pop(self.position))
# add the information of this job to next work center's storage
self.wc_list[next_wc].sequence_list.append(np.delete(self.sequence_list.pop(self.position),0))
self.wc_list[next_wc].pt_list.append(self.pt_list.pop(self.position))
# get the expected processing time of remaining processes
remaining_ptl = self.remaining_pt_list.pop(self.position)
self.wc_list[next_wc].remaining_pt_list.append(remaining_ptl)
# get old and current_slack time of the job, meanwhile add due to next wc's storage
current_slack = self.due_list[self.position] - self.env.now - np.sum(remaining_ptl.max(axis=1))
self.wc_list[next_wc].due_list.append(self.due_list.pop(self.position))
estimated_slack_time = self.slack_upon_arrival.pop(self.position)
del self.arrival_time_list[self.position]
# calculate slack gain/loss
self.slack_change = current_slack - estimated_slack_time
self.critical_level_R = 1 - estimated_slack_time / 100
# record the slack change
self.record_slack_tardiness()
# calculate the EMA_slack_change
self.EMA_slack_change += self.EMA_alpha * (self.slack_change - self.EMA_slack_change)
# and activate the dispatching of next work center
try:
self.wc_list[next_wc].routing_event.succeed()
except:
pass
# after transfered the job, update information of queuing jobs
self.state_update_all()
# clear some global information
self.update_global_info_after_operation()
# check if sequencing learning mode is on, and queue is not 0
if self.routing_learning_event.triggered:
try:
self.wc.build_routing_experience(self.job_idx,self.slack_change, self.critical_level_R)
except:
pass
if self.sequencing_learning_event.triggered:
self.complete_experience()
# if this is the last process, then simply delete job information
else:
#print('**FINISHED: Job %s from machine %s at time %s'%(self.queue[self.position], self.m_idx, self.env.now))
# calculate tardiness of job, and update EMA_realized_tardiness
self.tardiness = np.max([0, self.env.now - self.due_list[self.position]])
#print("realized tardiness is:", tardiness)
self.EMA_realized_tardiness += self.EMA_alpha * (self.tardiness - self.EMA_realized_tardiness)
#print(self.m_idx,self.EMA_realized_tardiness)
# delete this job from queue
del self.queue[self.position]
# delete the information of this job
del self.sequence_list[self.position]
del self.pt_list[self.position]
del self.remaining_pt_list[self.position]
# get old and current_slack time of the job
current_slack = self.due_list[self.position] - self.env.now # there's no more operations for this job
del self.due_list[self.position]
estimated_slack_time = self.slack_upon_arrival.pop(self.position)
del self.arrival_time_list[self.position]
# kick the job out of system
self.job_creator.record_job_departure()
#print(self.job_creator.in_system_job_no)
# calculate slack gain/loss
self.slack_change = current_slack - estimated_slack_time
self.critical_level_R = 1 - estimated_slack_time / 100
#print(current_slack, estimated_slack_time, self.critical_level_R)
# record the slack change
self.record_slack_tardiness(self.tardiness)
#print("estimated_slack_time: %s / current_slack: %s"%(estimated_slack_time, current_slack))
# calculate the EMA_slack_change
self.EMA_slack_change += self.EMA_alpha * (self.slack_change - self.EMA_slack_change)
# after transfered the job, update information of queuing jobs
self.state_update_all()
# clear some global information
self.update_global_info_after_operation()
# check if sequencing learning mode is on, and queue is not 0
# if yes, since the job is finished and tardiness is realized, construct complete experience
if self.routing_learning_event.triggered:
try:
self.wc.build_routing_experience(self.job_idx,self.slack_change, self.critical_level_R)
except:
pass
if self.sequencing_learning_event.triggered:
self.complete_experience()
if self.routing_global_reward:
self.add_global_reward_RA()
'''
3. downwards are functions that related to information update and exchange
especially the information that will be used by other agents on shop floor
'''
def record_production(self, pt, wait):
# add the details of operation to job_creator's repository
self.job_creator.production_record[self.job_idx][0].append((self.env.now,pt))
self.job_creator.production_record[self.job_idx][1].append(self.m_idx)
self.job_creator.production_record[self.job_idx][2].append(wait)
def record_slack_tardiness(self, *args):
self.job_creator.production_record[self.job_idx][4].append(self.slack_change)
if len(args):
self.job_creator.production_record[self.job_idx].append((self.env.now,args[0]))
# call this function after the completion of operation
def state_update_all(self):
# processing time of current process of each queuing job
self.current_pt = np.array([x[self.m_idx] for x in self.pt_list])
# cumultive processing time of all queuing jobs on this machine
self.cumulative_pt = self.current_pt.sum()
# the time the machine will be available (become idle or breakdown ends)
self.available_time = self.env.now + self.cumulative_pt
# expected cumulative processing time (worst possible) of all unfinished processes for each queuing job
self.remaining_job_pt = np.array([sum(x.mean(axis=1)) for x in self.remaining_pt_list])
self.remaining_no_op = np.array([len(x) for x in self.remaining_pt_list])
self.next_pt = np.array([x[0].mean() if len(x) else 0 for x in self.remaining_pt_list])
# the completion rate of all queuing jobs
self.completion_rate = np.array([(self.no_ops-len(x)-1)/self.no_ops for x in self.remaining_pt_list])
# number of queuing jobs
self.que_size = len(self.queue)
# time till due and slack time of jobs
self.time_till_due = np.array(self.due_list) - self.env.now
self.slack = self.time_till_due - self.current_pt - self.remaining_job_pt
# time that job spent in the queue
self.waited_time = self.env.now - np.array(self.arrival_time_list)
# WINQ
self.winq = np.array([self.wc_list[x[0]].average_workcontent if len(x) else 0 for x in self.sequence_list])
self.avlm = np.array([self.wc_list[x[0]].average_waiting if len(x) else 0 for x in self.sequence_list])
#print(self.sequence_list, self.winq)
# available timeis a bit tricky, jobs may come when the operation is ongoing
# or when the machine is already in starvation (availble time is earlier than now)
# hence we can't simply let available time = now + cumulative_pt
def state_update_after_job_arrival(self, increased_available_time):
self.current_pt = np.array([x[self.m_idx] for x in self.pt_list])
self.cumulative_pt = self.current_pt.sum()
# add the new job's pt to current time / current available time
self.available_time = max(self.available_time, self.env.now) + increased_available_time
self.que_size = len(self.queue)
# update the information of progression, eralized and expected tardiness to JOB_CREATOR !!!
def update_global_info_progression(self):
# realized: 0 if already tardy; exp: 0 is slack time is negative
realized = self.time_till_due.clip(0,1)
exp = self.slack.clip(0,1)
# update the machine's corresponding record in job creator, and several rates
self.job_creator.comp_rate_list[self.m_idx] = self.completion_rate
self.job_creator.comp_rate = np.concatenate(self.job_creator.comp_rate_list).mean()
self.job_creator.realized_tard_list[self.m_idx] = realized
self.job_creator.realized_tard_rate = 1 - np.concatenate(self.job_creator.realized_tard_list).mean()
self.job_creator.exp_tard_list[self.m_idx] = exp
self.job_creator.exp_tard_rate = 1 - np.concatenate(self.job_creator.exp_tard_list).mean()
self.job_creator.available_time_list[self.m_idx] = self.available_time
# update the information of the job that being processed to JOB_CREATOR !!!
def update_global_info_anticipation(self,pt):
current_j_idx = self.queue[self.position]
self.job_creator.current_j_idx_list[self.m_idx] = current_j_idx
next_wc = self.sequence_list[self.position][0] if len(self.sequence_list[self.position]) else -1 # next workcenter of the job
self.job_creator.next_wc_list[self.m_idx] = next_wc # update the next wc info (hold by job creator)
self.release_time = self.env.now + pt
self.job_creator.release_time_list[self.m_idx] = self.release_time # update the time of completion of current operation
job_rempt = self.remaining_job_pt[self.position].sum() - pt
self.job_creator.arriving_job_rempt_list[self.m_idx] = job_rempt # update the remaining pt of job under processing
job_slack = self.slack[self.position]
self.job_creator.arriving_job_slack_list[self.m_idx] = job_slack # update the slack time of processing job (hold by job creator)
# must call this after operation otherwise the record persists, lead to error
def update_global_info_after_operation(self):
self.job_creator.next_wc_list[self.m_idx] = -1 # after each operation, clear the record in job creator
# give out the information related to routing decision
def routing_data_generation(self):
# note that we subtract current time from available_time
# becasue state_update_all function may be called at a different time
self.routing_data = [self.cumulative_pt, max(0,self.available_time-self.env.now), self.que_size, self.cumulative_run_time]
return self.routing_data
# give ou the information related to sequencing decision
def sequencing_data_generation(self):
self.sequencing_data = \
[self.current_pt, self.remaining_job_pt, np.array(self.due_list), self.env.now, self.completion_rate, \
self.time_till_due, self.slack, self.winq, self.avlm, self.next_pt, self.remaining_no_op, self.waited_time, \
self.wc_idx, self.queue, self.m_idx]
#print(self.sequencing_data)
return self.sequencing_data
'''
4. downwards are functions related to the calculation of reward and construction of state
only be called if the sequencing learning mode is activated
the options of reward function are listed at bottom
'''
# this function is called only if self.sequencing_learning_event is triggered
# when this function is called upon the completion of an operation
# it add received data to corresponding record in job creator's incomplete_rep_memo
def complete_experience(self):
# it's possible that not all machines keep memory for learning
# machine that needs to keep memory don't keep record for all jobs
# only when they have to choose from several queuing jobs
try:
# check whether corresponding experience exists, if not, ends at this line
self.job_creator.incomplete_rep_memo[self.m_idx][self.decision_point]
#print('PARAMETERS',self.m_idx,self.decision_point,self.env.now)
#print('BEFORE\n',self.job_creator.incomplete_rep_memo[self.m_idx][self.decision_point])
# if yes, get the global state
local_data = self.sequencing_data_generation()
s_t = self.build_state(local_data)
#print(self.m_idx,s_t)
r_t = self.reward_function() # can change the reward function, by sepecifying before the training
#print(self.env.now, r_t)
self.job_creator.sqc_reward_record.append([self.env.now, r_t])
self.job_creator.incomplete_rep_memo[self.m_idx][self.decision_point] += [s_t, r_t]
#print(self.job_creator.incomplete_rep_memo[self.m_idx])
#print(self.job_creator.incomplete_rep_memo[self.m_idx][self.decision_point])
complete_exp = self.job_creator.incomplete_rep_memo[self.m_idx].pop(self.decision_point)
# and add it to rep_memo
self.job_creator.rep_memo[self.m_idx].append(complete_exp)
#print(self.job_creator.rep_memo[self.m_idx])
#print('AFTER\n',self.job_creator.incomplete_rep_memo[self.m_idx][self.decision_point])
#print(self.m_idx,self.env.now,'state: ',s_t,'reward: ',r_t)
except:
pass
# testing reward function, check if the agent learns, this function encourages using SPT
def get_reward0(self):
if self.pt_chosen <= self.current_pt[:self.waiting_jobs-1].mean():
r_t = 1
else:
r_t = 0
r_t = torch.tensor(r_t, dtype=torch.float)
return r_t
# those functions are called only if self.sequencing_learning_event is triggered
# this is function is called only upon the completion of all operations of a job
# it calculates the reward for all machines that job went through
# hence a complete experience is constructed and ready for learning
def get_reward1(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 50)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward2(self): # trial
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 110)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward3(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 64)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward4(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 20)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/40).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward5(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 20)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position] # just for chosen one
critical_level_loser = np.delete(critical_level, self.position).mean() # average value
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])*critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean()*critical_level_loser\
- self.before_op_winq_chosen*critical_level_chosen) * 0.1
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward6(self):
slack = self.before_op_slack
#print(self.before_op_ttd, self.before_op_remaining_pt, critical_ratio, self.position, self.pt_chosen, self.current_pt)
critical_level = 1 - slack / (np.absolute(slack) + 200)
print(critical_level)
# get critical level for jobs
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position)
# calculate adjusted avoided slack consumption for the chosen job
avoided_slack_consumption_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
avoided_slack_consumption_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
slack_consumption_loser = (self.pt_chosen * critical_level_loser).mean()
# calculate the reward
print(critical_level, self.current_pt[:self.waiting_jobs-1], self.pt_chosen, self.position)
rwd = ((avoided_slack_consumption_chosen - slack_consumption_loser)/20).clip(-1,1)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward7(self):
slack = self.before_op_slack
#print(self.before_op_ttd, self.before_op_remaining_pt, critical_ratio, self.position, self.pt_chosen, self.current_pt)
critical_level = 1 - slack / (np.absolute(slack) + 25)
print(critical_level)
# get critical level for jobs
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position)
# calculate adjusted avoided slack consumption for the chosen job
avoided_slack_consumption_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
avoided_slack_consumption_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
slack_consumption_loser = (self.pt_chosen * critical_level_loser).mean()
# calculate the reward
print(critical_level, self.current_pt[:self.waiting_jobs-1], self.pt_chosen, self.position)
rwd = ((avoided_slack_consumption_chosen - slack_consumption_loser)/20).clip(-1,1)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward8(self):
slack = self.before_op_slack
#print(self.before_op_ttd, self.before_op_remaining_pt, critical_ratio, self.position, self.pt_chosen, self.current_pt)
critical_level = 1 - slack / (np.absolute(slack) + 64)
print(critical_level)
# get critical level for jobs
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position)
# calculate adjusted avoided slack consumption for the chosen job
avoided_slack_consumption_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
avoided_slack_consumption_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
slack_consumption_loser = (self.pt_chosen * critical_level_loser).mean()
# calculate the reward
print(critical_level, self.current_pt[:self.waiting_jobs-1], self.pt_chosen, self.position)
rwd = ((avoided_slack_consumption_chosen - slack_consumption_loser)/20).clip(-1,1)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward9(self): # adjust the slack consumption by critical ratio, for low hetero case
slack = self.before_op_slack
#print(self.before_op_ttd, self.before_op_remaining_pt, critical_ratio, self.position, self.pt_chosen, self.current_pt)
critical_level = 1 - slack / (np.absolute(slack) + 50)
print(critical_level)
# get critical level for jobs
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position)
# calculate adjusted avoided slack consumption for the chosen job
avoided_slack_consumption_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
avoided_slack_consumption_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
slack_consumption_loser = (self.pt_chosen * critical_level_loser).mean()
# calculate the reward
print(critical_level, self.current_pt[:self.waiting_jobs-1], self.pt_chosen, self.position)
rwd = ((avoided_slack_consumption_chosen - slack_consumption_loser)/20).clip(-1,1)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward10(self): # adjust the slack consumption by critical ratio, and clip the critical ratio of untrady jobs
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 50)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward11(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 50)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward12(self): # trial
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 100)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward13(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 64)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = np.mean(self.current_pt[:self.waiting_jobs-1])
earned_slack_chosen *= critical_level_chosen
# calculate the AVERAGE adjusted slack consumption for jobs that not been chosen
consumed_slack_loser = self.pt_chosen*critical_level_loser.mean()
# slack reward
rwd_slack = earned_slack_chosen - consumed_slack_loser
# WINQ reward
rwd_winq = (self.before_op_winq_loser.mean() - self.before_op_winq_chosen) * 0.2
# calculate the reward
#print(rwd_slack, rwd_winq)
rwd = ((rwd_slack + rwd_winq)/20).clip(-1,1)
# optional printout
#print(self.env.now,'slack and pt:', slack, critical_level, self.position, self.pt_chosen, self.current_pt[:self.waiting_jobs-1])
#print(self.env.now,'winq and reward:',self.before_op_winq_chosen, self.before_op_winq_loser, earned_slack_chosen, consumed_slack_loser)
#print(self.env.now,'reward:',rwd)
r_t = torch.tensor(rwd , dtype=torch.float)
return r_t
def get_reward14(self):
slack = self.before_op_slack
critical_level = 1 - slack / (np.absolute(slack) + 20)
# get critical level for jobs, chosen and loser, respectively
critical_level_chosen = critical_level[self.position]
critical_level_loser = np.delete(critical_level, self.position) # could be a vector or scalar
# calculate adjusted earned slack for the chosen job
earned_slack_chosen = | np.mean(self.current_pt[:self.waiting_jobs-1]) | numpy.mean |
#!/usr/bin/env python3
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# imports
import gpxpy
import numpy as np
from datetime import datetime, tzinfo
from scipy.interpolate import interp1d, splprep, splev
from typing import Dict, List, Union
# types
GPXData = Dict[str, Union[List[float], tzinfo]]
# globals
EARTH_RADIUS = 6371e3 # meter
EPS = 1e-6 # second
# functions
def gpx_interpolate(gpx_data: GPXData, res: float = 1.0, num: int = 0, deg: int = 1) -> GPXData:
"""
Returns gpx_data interpolated with a spatial resolution res using a spline of degree deg
if num > 0, gpx_data is interpolated to num points and res is ignored
"""
if not type(deg) is int:
raise TypeError('deg must be int')
if not 1 <= deg <= 5:
raise ValueError('deg must be in [1-5]')
if not len(gpx_data['lat']) > deg:
raise ValueError('number of data points must be > deg')
# interpolate spatial data
_gpx_data = gpx_remove_duplicates(gpx_data)
_gpx_dist = gpx_calculate_distance(_gpx_data, use_ele = True)
x = [_gpx_data[i] for i in ('lat', 'lon', 'ele') if _gpx_data[i]]
tck, _ = splprep(x, u = np.cumsum(_gpx_dist), k = deg, s = 0)
num = num if num else 1+int(np.sum(_gpx_dist)/res+0.5)
u_interp = np.linspace(0, np.sum(_gpx_dist), num)
x_interp = splev(u_interp, tck)
# interpolate time data linearly to preserve monotonicity
if _gpx_data['tstamp']:
f = interp1d(np.cumsum(_gpx_dist), _gpx_data['tstamp'], fill_value = 'extrapolate')
tstamp_interp = f(u_interp)
gpx_data_interp = {'lat':list(x_interp[0]),
'lon':list(x_interp[1]),
'ele':list(x_interp[2]) if gpx_data['ele'] else None,
'tstamp':list(tstamp_interp) if gpx_data['tstamp'] else None,
'tzinfo':gpx_data['tzinfo']}
return gpx_data_interp
def gpx_calculate_distance(gpx_data: GPXData, use_ele: bool = True) -> List[float]:
"""
Returns the distance between GPX trackpoints
if use_ele is True and gpx_data['ele'] is not None, the elevation data is used to compute the distance
"""
gpx_dist = np.zeros(len(gpx_data['lat']))
for i in range(len(gpx_dist)-1):
lat1 = np.radians(gpx_data['lat'][i])
lon1 = np.radians(gpx_data['lon'][i])
lat2 = np.radians(gpx_data['lat'][i+1])
lon2 = np.radians(gpx_data['lon'][i+1])
delta_lat = lat2-lat1
delta_lon = lon2-lon1
c = 2.0*np.arcsin(np.sqrt(np.sin(delta_lat/2.0)**2+np.cos(lat1)* | np.cos(lat2) | numpy.cos |
#!/usr/bin/env python
# python 3 compatibility
from __future__ import print_function
import os.path
import sys
import shutil
import time
# stdlib imports
import abc
import textwrap
import glob
import os
import tempfile
# hack the path so that I can debug these functions if I need to
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
mapiodir = os.path.abspath(os.path.join(homedir, ".."))
# put this at the front of the system path, ignoring any installed mapio stuff
sys.path.insert(0, mapiodir)
# third party imports
from mapio.gridbase import Grid
from mapio.grid2d import Grid2D
from mapio.gdal import GDALGrid, get_affine
from mapio.dataset import DataSetException
from mapio.geodict import GeoDict
import numpy as np
from scipy import interpolate
import shapely
from affine import Affine
from rasterio import features
from rasterio.warp import reproject, Resampling, calculate_default_transform
from rasterio.crs import CRS
import rasterio
from shapely.geometry import MultiPoint, Polygon, mapping
import pyproj
def test_subdivide():
print("Testing subdivide method - aligned grids...")
data = np.arange(0, 4).reshape((2, 2))
geodict = GeoDict(
{
"xmin": 0.0,
"xmax": 1.0,
"ymin": 0.0,
"ymax": 1.0,
"dx": 1.0,
"dy": 1.0,
"ny": 2,
"nx": 2,
}
)
hostgrid = Grid2D(data, geodict)
finedict = GeoDict(
{
"xmin": 0.0 - (1.0 / 3.0),
"xmax": 1.0 + (1.0 / 3.0),
"ymin": 0.0 - (1.0 / 3.0),
"ymax": 1.0 + (1.0 / 3.0),
"dx": 1.0 / 3.0,
"dy": 1.0 / 3.0,
"ny": 6,
"nx": 6,
}
)
finegrid = hostgrid.subdivide(finedict)
output = np.array(
[
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
[2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
[2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
]
)
np.testing.assert_almost_equal(finegrid.getData(), output)
print("Passed subdivide method test - aligned grids.")
print("Testing subdivide method - non-aligned grids...")
data = np.arange(0, 9).reshape((3, 3))
geodict = GeoDict(
{
"xmin": 0.0,
"xmax": 10.0,
"ymin": 0.0,
"ymax": 10.0,
"dx": 5.0,
"dy": 5.0,
"ny": 3,
"nx": 3,
}
)
hostgrid = Grid2D(data, geodict)
finedict = GeoDict(
{
"xmin": -2.5,
"xmax": 11.5,
"ymin": -1.5,
"ymax": 10.5,
"dx": 2.0,
"dy": 2.0,
"nx": 8,
"ny": 7,
}
)
N = np.nan
print("Testing subdivide with min parameter...")
finegrid = hostgrid.subdivide(finedict, cellFill="min")
output = np.array(
[
[N, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0],
[N, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0],
[N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0],
[N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0],
[N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0],
[N, 6.0, 6.0, 7.0, 7.0, 7.0, 8.0, 8.0],
[N, 6.0, 6.0, 7.0, 7.0, 7.0, 8.0, 8.0],
]
)
np.testing.assert_almost_equal(finegrid.getData(), output)
print("Passed subdivide with min parameter...")
print("Testing subdivide with max parameter...")
finegrid = hostgrid.subdivide(finedict, cellFill="max")
output = np.array(
[
[N, 0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 2.0],
[N, 0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 2.0],
[N, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 5.0],
[N, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 5.0],
[N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0],
[N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0],
[N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0],
]
)
np.testing.assert_almost_equal(finegrid.getData(), output)
print("Passed subdivide with max parameter...")
print("Testing subdivide with mean parameter...")
finegrid = hostgrid.subdivide(finedict, cellFill="mean")
output = np.array(
[
[N, 0.0, 0.0, 1.0, 1.0, 1.5, 2.0, 2.0],
[N, 0.0, 0.0, 1.0, 1.0, 1.5, 2.0, 2.0],
[N, 3.0, 3.0, 4.0, 4.0, 4.5, 5.0, 5.0],
[N, 3.0, 3.0, 4.0, 4.0, 4.5, 5.0, 5.0],
[N, 4.5, 4.5, 5.5, 5.5, 6.0, 6.5, 6.5],
[N, 6.0, 6.0, 7.0, 7.0, 7.5, 8.0, 8.0],
[N, 6.0, 6.0, 7.0, 7.0, 7.5, 8.0, 8.0],
]
)
np.testing.assert_almost_equal(finegrid.getData(), output)
print("Passed subdivide with mean parameter...")
print("Passed subdivide method test - non-aligned grids.")
def test_basics():
geodict = GeoDict(
{
"xmin": 0.5,
"xmax": 3.5,
"ymin": 0.5,
"ymax": 3.5,
"dx": 1.0,
"dy": 1.0,
"ny": 4,
"nx": 4,
}
)
data = np.arange(0, 16).reshape(4, 4).astype(np.float32)
grid = Grid2D(data, geodict)
print(
"Testing basic Grid2D functionality (retrieving data, lat/lon to pixel coordinates, etc..."
)
np.testing.assert_almost_equal(grid.getData(), data)
assert grid.getGeoDict() == geodict
assert grid.getBounds() == (geodict.xmin, geodict.xmax, geodict.ymin, geodict.ymax)
lat, lon = grid.getLatLon(0, 0)
assert lat == 3.5 and lon == 0.5
row, col = grid.getRowCol(lat, lon)
assert row == 0 and col == 0
value = grid.getValue(lat, lon)
assert value == 0
frow, fcol = grid.getRowCol(1.0, 3.0, returnFloat=True)
assert frow == 2.5 and fcol == 2.5
irow, icol = grid.getRowCol(1.0, 3.0, returnFloat=False)
assert irow == 2 and icol == 2
# test getting values in and outside of the grid bounds
lat = np.array([0.0, 0.5, 2.5, 4.0])
lon = np.array([0.0, 0.5, 2.5, 4.0])
default = np.nan
output = | np.array([np.nan, 12, 6, np.nan]) | numpy.array |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _interleave(lists, cycle_length, block_length):
"""Reference implementation of interleave used for testing.
Args:
lists: a list of lists to interleave
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
Yields:
Elements of `lists` interleaved in the order determined by `cycle_length`
and `block_length`.
"""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def _make_coordinated_sloppy_dataset(input_values, cycle_length, block_length,
num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
input_values: the values to generate lists to interleave from
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
num_parallel_calls: the degree of interleave parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in input_values}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
2).interleave(interleave_fn, cycle_length, block_length,
num_parallel_calls).with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
return get_next, coordination_events
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)]
class InterleaveDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", [4, 5, 6], 1, 1, [
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6
]),
("2", [4, 5, 6], 2, 1, [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6,
5, 6, 5, 6, 5, 6, 6
]),
("3", [4, 5, 6], 2, 3, [
4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6,
6, 6, 5, 5, 6, 6, 6
]),
("4", [4, 5, 6], 7, 2, [
4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6,
6, 5, 6, 6, 5, 6, 6
]),
("5", [4, 0, 6], 2, 1,
[4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6]),
)
def testPythonImplementation(self, input_values, cycle_length, block_length,
expected_elements):
input_lists = _repeat(input_values, 2)
for expected, produced in zip(
expected_elements, _interleave(input_lists, cycle_length,
block_length)):
self.assertEqual(expected, produced)
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, None),
("2", np.int64([4, 5, 6]), 1, 3, 1),
("3", np.int64([4, 5, 6]), 2, 1, None),
("4", np.int64([4, 5, 6]), 2, 1, 1),
("5", np.int64([4, 5, 6]), 2, 1, 2),
("6", np.int64([4, 5, 6]), 2, 3, None),
("7", np.int64([4, 5, 6]), 2, 3, 1),
("8", np.int64([4, 5, 6]), 2, 3, 2),
("9", np.int64([4, 5, 6]), 7, 2, None),
("10", np.int64([4, 5, 6]), 7, 2, 1),
("11", np.int64([4, 5, 6]), 7, 2, 3),
("12", np.int64([4, 5, 6]), 7, 2, 5),
("13", np.int64([4, 5, 6]), 7, 2, 7),
("14", np.int64([]), 2, 3, None),
("15", np.int64([0, 0, 0]), 2, 3, None),
("16", np.int64([4, 0, 6]), 2, 3, None),
("17", np.int64([4, 0, 6]), 2, 3, 1),
("18", np.int64([4, 0, 6]), 2, 3, 2),
)
def testInterleaveDataset(self, input_values, cycle_length, block_length,
num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
for expected_element in _interleave(
_repeat(input_values, count), cycle_length, block_length):
self.assertEqual(expected_element, sess.run(get_next))
for _ in range(2):
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, None),
("2", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, 1),
("3", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, None),
("4", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 1),
("5", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 2),
("6", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, None),
("7", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 1),
("8", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 2),
("9", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, None),
("10", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 1),
("11", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 3),
("12", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 5),
("13", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 7),
)
def testInterleaveDatasetError(self, input_values, cycle_length, block_length,
num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).interleave(
dataset_ops.Dataset.from_tensors, cycle_length, block_length,
num_parallel_calls)
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
for value in input_values:
if np.isnan(value):
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
else:
self.assertEqual(value, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testInterleaveSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
iterator = (
dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1).make_one_shot_iterator())
get_next = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 2, 1, 1),
("2", np.int64([4, 5, 6]), 2, 1, 2),
("3", np.int64([4, 5, 6]), 2, 3, 1),
("4", np.int64([4, 5, 6]), 2, 3, 2),
("5", np.int64([4, 5, 6]), 3, 2, 1),
("6", np.int64([4, 5, 6]), 3, 2, 2),
("7", np.int64([4, 5, 6]), 3, 2, 3),
("8", np.int64([4, 0, 6]), 2, 3, 1),
("9", | np.int64([4, 0, 6]) | numpy.int64 |
import os
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
FLAGS = tf.app.flags.FLAGS
def save_image(data, data_format, e):
"""Saves a picture showing the current progress of the model"""
X_G, X_real = data
Xg = X_G[:8]
Xr = X_real[:8]
if data_format == "NHWC":
X = | np.concatenate((Xg, Xr), axis=0) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import os
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import src.utils.log as log
from src.data_formats.misc import DatasetMisc
from src.data_formats.actions import define_actions
import src.data_loading.load_human36 as h36
from src.data_loading.human36_dataset import Human36M
from src.model import LinearModel, weight_init
from src.human_36.train_human36 import train_human
from src.human_36.test_human36 import test_human
import src.utils.viz as viz
def main_human(opt, save_op=True, return_proc=False):
start_epoch = 0
err_test_best = 100000
glob_step = 0
lr_now = opt.lr
# save options
if save_op:
log.save_options(opt, opt.ckpt)
print("\n==================Actions=================")
actions = define_actions(opt.action)
print(">>> actions to use: {}".format(len(actions)))
pprint(actions, indent=4)
print("==========================================\n")
print("\n==================Data=================")
print(">>> loading data")
# load structure for miscellaneous info
misc = DatasetMisc(opt.dataset_type)
# load the data from the h5 annotations
data_dict_train, cameras_dict_train, data_dict_test, cameras_dict_test, \
stat_2d, stat_3d = h36.load_human36(misc, opt, actions)
# relevant options for creating the train and test data loader
tol_mm = opt.tolerance_mm
num_pairs = opt.num_pairs
amt_train_data = opt.amt_train_data
amt_test_data = opt.amt_test_data
train_rel_labels_noise_prob = opt.rel_labels_noise_prob
test_rel_labels_noise_prob = 0. #NOTE: hard coded to 0 for the test set
in_dropout_p = opt.in_dropout_p
if opt.is_train:
print("\n>>> creating Train dataset")
# create dataset of type Human36M
train_h36 = \
Human36M( misc, cameras_dict_train, data_dict_train, stat_2d, stat_3d,
tol_mm, num_pairs, amt_train_data, train_rel_labels_noise_prob,
in_dropout_p, is_train=True )
# create data loader
train_loader = DataLoader(dataset=train_h36, batch_size=opt.train_batch,
shuffle=True, num_workers=opt.job)
print(" - number of batches: {}".format(len(train_loader)))
if opt.is_test:
print("\n>>> creating Test dataset")
# create dataset of type Human36M
test_h36 = \
Human36M( misc, cameras_dict_test, data_dict_test, stat_2d, stat_3d,
tol_mm, num_pairs, amt_test_data, test_rel_labels_noise_prob,
in_dropout_p, is_train=False )
# create data loader
test_loader = DataLoader(dataset=test_h36, batch_size=opt.test_batch,
shuffle=False, num_workers=0, drop_last=False)
print(" - number of batches: {}".format(len(test_loader)))
print("==========================================\n")
print("\n==================Model=================")
print(">>> creating model")
num_2d_coords = misc.NUM_KEYPOINTS_2D * 2
num_3d_coords = misc.NUM_KEYPOINTS_3D * 3
model = LinearModel(num_2d_coords, num_3d_coords,
linear_size=opt.linear_size,
num_stage=opt.num_stage,
p_dropout=opt.dropout,
predict_scale=opt.predict_scale,
scale_range=opt.scale_range,
unnorm_op=opt.unnorm_op,
unnorm_init=opt.unnorm_init)
model = model.cuda()
model.apply(weight_init)
print(" - total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1e6))
print("==========================================\n")
############################################################################
# define losses and optimizers
mse_loss = nn.MSELoss(size_average=True).cuda()
# mse_loss = nn.MSELoss(size_average=True)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
cudnn.benchmark = True
############################################################################
# load ckpt
if opt.load:
print(">>> loading ckpt from '{}'".format(opt.load))
ckpt = torch.load(opt.load)
stat_3d = ckpt['stat_3d']
stat_2d = ckpt['stat_2d']
err_best = ckpt['err']
lr_now = ckpt['lr']
glob_step = ckpt['step']
model.load_state_dict(ckpt['state_dict'])
optimizer.load_state_dict(ckpt['optimizer'])
if not opt.resume:
print(">>> ckpt loaded (epoch: {} | err: {})".format(start_epoch, err_best))
if opt.resume:
assert opt.load != ''
start_epoch = ckpt['epoch']
logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
print(">>> ckpt loaded (epoch: {} | err: {})".format(start_epoch, err_best))
else:
logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
logger.set_names(['it', 'lr', 'l_train', 'l_test', 'e_test', 'e_test_s', 'e_test_p'])
############################################################################
## TRAINING LOOP
overall_train_losses = [[0],[0],[0],[0]]
loss_lbls = ['sup_loss','rel_loss', 'rep_loss', 'cam_loss']
for epoch in range(start_epoch, opt.epochs):
print('\n==========================')
print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
########################################################################
## TRAIN
avg_loss_train = -1
if opt.is_train:
print('\n - Training')
glob_step, lr_now, avg_loss_train, losses_train = \
train_human(
train_loader=train_loader, misc=misc,
stat_2d=stat_2d, stat_3d=stat_3d,
standardize_input_data=opt.standardize_input_data,
standardize_output_data=opt.standardize_output_data,
use_rel_loss=opt.use_rel_loss,
subtract_2d_root=opt.subtract_2d_root,
keep_root=opt.keep_root,
optimizer=optimizer, model=model, mse_loss=mse_loss,
reprojection=opt.reprojection,
use_full_intrinsics=opt.use_full_intrinsics,
predict_scale=opt.predict_scale, limb_type=opt.limb_type,
glob_step=glob_step, lr_init=opt.lr, lr_now=lr_now,
lr_decay=opt.lr_decay, gamma=opt.lr_gamma,
max_norm=opt.max_norm,
distance_multiplier=opt.distance_multiplier,
loss_weights=opt.loss_weights)
for li,l in enumerate(overall_train_losses): l.extend(losses_train[li])
viz.plot_losses(overall_train_losses, loss_lbls,
opt.ckpt + '/train_losses.jpg', 'Training Set', 'iterations', 'losses')
########################################################################
## TEST
loss_test = err_test = err_test_scale = err_test_proc = -1
if opt.is_test and (glob_step) % opt.test_step == 0:
print('\n - Testing')
loss_test, target_poses, out_poses, proc_poses, scaled_poses = \
test_human(
test_loader=test_loader, misc=misc,
stat_2d=stat_2d, stat_3d=stat_3d,
standardize_input_data=opt.standardize_input_data,
standardize_output_data=opt.standardize_output_data,
subtract_2d_root=opt.subtract_2d_root, keep_root=opt.keep_root,
model=model, mse_loss=mse_loss, use_rel_loss=opt.use_rel_loss,
save_ims=opt.save_ims, epoch=epoch, op_dir=opt.ckpt_ims)
target_poses = np.vstack(target_poses)
out_poses = np.vstack(out_poses)
scaled_poses = np.vstack(scaled_poses)
proc_poses = np.vstack(proc_poses)
####################################################################
## compute error in mm for both protocols (with and without procrustes)
sqerr = (out_poses - target_poses) ** 2
sqerr_proc = (proc_poses - target_poses) ** 2
sqerr_scaled = (scaled_poses - target_poses) ** 2
all_err = np.sqrt(sqerr[:, 0::3] + sqerr[:,1::3] + sqerr[:,2::3])
all_err_scaled = np.sqrt(sqerr_scaled[:, 0::3] + sqerr_scaled[:,1::3] + sqerr_scaled[:,2::3])
all_err_proc = np.sqrt(sqerr_proc[:, 0::3] + sqerr_proc[:,1::3] + sqerr_proc[:,2::3])
err_test = np.mean(all_err)
err_test_scale = np.mean(all_err_scaled)
err_test_proc = np.mean(all_err_proc)
print ("> 3d error {}".format(round(err_test, 3)))
print ("> 3d error (scaled) {}".format(round(err_test_scale, 3)))
print ("> 3d error (procrustes) {}".format(round(err_test_proc, 3)))
print("-"*25)
# compute the errors per action
a_test = data_dict_test['A'][test_h36.inds]
vals, counts = | np.unique(a_test, return_counts=True) | numpy.unique |
# Regularization - Deep Learning models have so much flexibility and capacity that overfitting can be a serious problem,
# if the training dataset is not big enough.
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
# %matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
train_X, train_Y, test_X, test_Y, dplot = load_2D_dataset()
dplot.show()
""" L2 Regularization -- functions: "compute_cost_with_regularization()" and "backward_propagation_with_regularization()
Dropout -- functions: "forward_propagation_with_dropout()" and "backward_propagation_with_dropout() """
def model(X, Y, learning_rate=0.3, num_iterations=30000, print_cost=True, lambd=0., keep_prob=1):
"""
Implements a three-layer neural network: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob -- probability of keeping a neuron active during drop-out, scalar
Returns:
parameters -- parameters learned by the model. They can be used to predict.
"""
grads = {}
costs = [] # to keep the track of the cost
m = X.shape[1] # number of examples
layer_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary
parameters = initialize_parameters(layer_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward Propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0.0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward Propagation
assert (lambd == 0 or keep_prob == 1) # it is possible to use L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the Loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# Let's train the model without any regularization, and observe the accuracy on the train/test sets.
parameters = model(train_X, train_Y)
print("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# plot the decision boundary of your model
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75, 0.40])
axes.set_ylim([-0.75, 0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The non-regularized model is obviously overfitting the training set. It is fitting the noisy points!
# Lets now look at two techniques to reduce overfitting.
# L2 Regularization -- The standard way to avoid overfitting is called L2 regularization.
# It consists of appropriately modifying your cost function. use np.sum(np.square(Wl))
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost -- value of the regularized loss function
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # cross-entropy part of the cost
L2_regularization_cost = lambd / 2 / m * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd=0.1)))
# Of course, because you changed the cost, you have to change backward propagation as well!
# All the gradients have to be computed with respect to this new cost.
# Exercise: Implement the changes needed in backward propagation to take into account regularization.
# The changes only concern dW1, dW2 and dW3.
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
Implements the backward propagation of our baseline model to which we added an L2 regularization
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward propagation
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation
variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T) + lambd * W3 / m
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T) + lambd * W2 / m
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T) + lambd * W1 / m
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd=0.7)
print("dW1 = " + str(grads["dW1"]))
print("dW2 = " + str(grads["dW2"]))
print("dW3 = " + str(grads["dW3"]))
# Let's now run the model with L2 regularization $(lambda = 0.7). The model() function will call:
# compute_cost_with_regularization instead of compute_cost
# backward_propagation_with_regularization instead of backward_propagation
parameters = model(train_X, train_Y, lambd=0.7)
print("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# Test set accuracy increased to 93%. Not overfitting the training data anymore.
# Let's plot the decision boundary.
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75, 0.40])
axes.set_ylim([-0.75, 0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
""" Observations:
The value of lambda is a hyperparameter that you can tune using a dev set.
L2 regularization makes your decision boundary smoother. If lambda is too large,
it is also possible to "oversmooth", resulting in a model with high bias.
What is L2-regularization actually doing?:
L2-regularization relies on the assumption that a model with small weights is simpler than
a model with large weights. Thus, by penalizing the square values of the weights in the cost function
you drive all the weights to smaller values. It becomes too costly for the cost to have large weights!
This leads to a smoother model in which the output changes more slowly as the input changes.
The cost computation:
A regularization term is added to the cost
The backpropagation function:
There are extra terms in the gradients with respect to weight matrices
Weights end up smaller ("weight decay"):
Weights are pushed to smaller values."""
# Dropout -- Widely used regularization technique specific to deep learning.
# It randomly shuts down some neurons in each iteration
# At each iteration, you train a different model that uses only a subset of your neurons.
# With dropout, your neurons thus become less sensitive to the activation of one other specific neuron,
# because that other neuron might be shut down at any time.
# You are using a 3 layer neural network, and will add dropout to the first and second hidden layers.
# We will not apply dropout to the input layer or output layer.
# Instructions: You would like to shut down some neurons in the first and second layers.
# To do that, you are going to carry out 4 Steps:
#
# In lecture, we dicussed creating a variable d[1] with the same shape as a[1] using np.random.rand()
# to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation,
# so create a random matrix D[1] = d[1](1) d[1](2) ... d[1](m)] of the same dimension as A[1].
# Set each entry of D[1] to be 0 with probability (1-keep_prob) or 1 with probability (keep_prob),
# by thresholding values in D[1] appropriately. Hint: to set all the entries of a
# matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: X = (X < 0.5).
# Note that 0 and 1 are respectively equivalent to False and True.
# Set A[1] to A[1] * D[1]. (You are shutting down some neurons). You can think of D[1] as a mask,
# so that when it is multiplied with another matrix, it shuts down some of the values.
# Divide A[1] by keep_prob. By doing this you are assuring that the result of the cost will still have
# the same expected value as without drop-out. (This technique is also called inverted dropout.)
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob -- probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1, 1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 *= D1 # Step 3: shut down some neurons of A1
A1 /= keep_prob # Step 4: scale the value of neurons that haven't been shut down
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 *= D2 # Step 3: shut down some neurons of A2
A2 /= keep_prob # Step 4: scale the value of neurons that haven't been shut down
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob=0.7)
print("A3 = " + str(A3))
# Backward propagation with dropout
# Add dropout to the first and second hidden layers, using the masks D[1] and D[2] stored in the cache.
# Instruction: Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:
#
# You had previously shut down some neurons during forward propagation, by applying a mask D[1] to A1.
# In backpropagation, you will have to shut down the same neurons, by reapplying the same mask D[1] to dA1.
# During forward propagation, you had divided A1 by keep_prob. In backpropagation, you'll therefore have to divide
# dA1 by keep_prob again (the calculus interpretation is that if A[1] is scaled by keep_prob,
# then its derivative dA[1] is also scaled by the same keep_prob).
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob -- probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation
variables.
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 -Y
dW3 = 1./m * np.dot(dZ3, A3.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dA2 *= D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 /= keep_prob # Step 2: Scale the value of neurons that haven't been shut down
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * | np.dot(dZ2, A1.T) | numpy.dot |
# coding: utf-8
"""Base class of atomic structure based property dataset.
If you want to add new property to extend HDNNP, inherits this base
class.
"""
from abc import (ABC, abstractmethod)
import numpy as np
from tqdm import tqdm
from hdnnpy.utils import (MPI, pprint, recv_chunk, send_chunk)
class PropertyDatasetBase(ABC):
"""Base class of atomic structure based property dataset."""
PROPERTIES = []
"""list [str]: Names of properties for each derivative order."""
COEFFICIENTS = []
"""list [float]: Coefficient values of each properties."""
UNITS = []
"""list [str]: Units of properties for each derivative order."""
name = None
"""str: Name of this property class."""
n_property = None
"""int: Number of dimensions of 0th property."""
def __init__(self, order, structures):
"""
Common instance variables for property datasets are initialized.
Args:
order (int): Derivative order of property to calculate.
structures (list [AtomicStructure]):
Properties are calculated for these atomic structures.
"""
self._order = order
self._properties = self.PROPERTIES[: order+1]
self._elemental_composition = structures[0].get_chemical_symbols()
self._elements = sorted(set(self._elemental_composition))
self._length = len(structures)
self._slices = [slice(i[0], i[-1]+1)
for i in np.array_split(range(self._length), MPI.size)]
self._structures = structures[self._slices[MPI.rank]]
self._tag = structures[0].info['tag']
self._coefficients = self.COEFFICIENTS[: order+1]
self._units = self.UNITS[: order+1]
self._dataset = []
def __getitem__(self, item):
"""Return property data this instance has.
If ``item`` is string, it returns corresponding property.
Available keys can be obtained by ``properties`` attribute.
Otherwise, it returns a list of property sliced by ``item``.
"""
if isinstance(item, str):
try:
index = self._properties.index(item)
except ValueError:
raise KeyError(item) from None
return self._dataset[index]
else:
return [data[item] for data in self._dataset]
def __len__(self):
"""Number of atomic structures given at initialization."""
return self._length
@property
def coefficients(self):
"""list [float]: Coefficient values this instance have."""
return self._coefficients
@property
def elemental_composition(self):
"""list [str]: Elemental composition of atomic structures given
at initialization."""
return self._elemental_composition
@property
def elements(self):
"""list [str]: Elements of atomic structures given at
initialization."""
return self._elements
@property
def has_data(self):
"""bool: True if success to load or make dataset,
False otherwise."""
return len(self._dataset) == self._order + 1
@property
def order(self):
"""int: Derivative order of property to calculate."""
return self._order
@property
def properties(self):
"""list [str]: Names of properties this instance have."""
return self._properties
@property
def tag(self):
"""str: Unique tag of atomic structures given at
initialization.
Usually, it is a form like ``<any prefix> <chemical formula>``.
(ex. ``CrystalGa2N2``)
"""
return self._tag
@property
def units(self):
"""list [str]: Units of properties this instance have."""
return self._units
def clear(self):
"""Clear up instance variables to initial state."""
self._dataset.clear()
def load(self, file_path, verbose=True, remake=False):
"""Load dataset from .npz format file.
Only root MPI process load dataset.
It validates following compatibility between loaded dataset and
atomic structures given at initialization.
* length of data
* elemental composition
* elements
* tag
It also validates that loaded dataset satisfies requirements.
* order
Args:
file_path (~pathlib.Path): File path to load dataset.
verbose (bool, optional): Print log to stdout.
remake (bool, optional): If loaded dataset is lacking in
any property, recalculate dataset from scratch and
overwrite it to ``file_path``. Otherwise, it raises
ValueError.
Raises:
AssertionError: If loaded dataset is incompatible with
atomic structures given at initialization.
ValueError: If loaded dataset is lacking in any property and
``remake=False``.
"""
# validate compatibility between my structures and loaded dataset
ndarray = np.load(file_path)
assert list(ndarray['elemental_composition']) \
== self._elemental_composition
assert list(ndarray['elements']) == self._elements
assert ndarray['tag'].item() == self._tag
assert len(ndarray[self._properties[0]]) == len(self)
# validate lacking properties
lacking_properties = set(self._properties) - set(ndarray)
if lacking_properties:
if verbose:
lacking = ('\n'+' '*20).join(sorted(lacking_properties))
pprint(f'''
Following properties are lacked in {file_path}.
{lacking}
''')
if remake:
if verbose:
pprint('Start to recalculate dataset from scratch.')
self.make(verbose=verbose)
self.save(file_path, verbose=verbose)
return
else:
raise ValueError('Please recalculate dataset from scratch.')
# load dataset as much as needed
if MPI.rank == 0:
for i in range(self._order + 1):
self._dataset.append(ndarray[self._properties[i]])
if verbose:
pprint(f'Successfully loaded & made needed {self.name} dataset'
f' from {file_path}')
def make(self, verbose=True):
"""Calculate & retain property dataset
| It calculates property dataset by data-parallel using MPI
communication.
| The calculated dataset is retained in only root MPI process.
Each property values are divided by ``COEFFICIENTS`` which is
unique to each property dataset class.
Args:
verbose (bool, optional): Print log to stdout.
"""
dataset = []
for structure in tqdm(self._structures,
ascii=True, desc=f'Process #{MPI.rank}',
leave=False, position=MPI.rank):
dataset.append(self.calculate_properties(structure))
for data_list, coefficient in zip(zip(*dataset), self._coefficients):
shape = data_list[0].shape
send_data = np.stack(data_list) / coefficient
del data_list
if MPI.rank == 0:
recv_data = np.empty((self._length, *shape), dtype=np.float32)
recv_data[self._slices[0]] = send_data
del send_data
for i in range(1, MPI.size):
recv_data[self._slices[i]] = recv_chunk(source=i)
self._dataset.append(recv_data)
else:
send_chunk(send_data, dest=0)
del send_data
if verbose:
pprint(f'Calculated {self.name} dataset.')
def save(self, file_path, verbose=True):
"""Save dataset to .npz format file.
Only root MPI process save dataset.
Args:
file_path (~pathlib.Path): File path to save dataset.
verbose (bool, optional): Print log to stdout.
Raises:
RuntimeError: If this instance do not have any data.
"""
if not MPI.comm.bcast(self.has_data, root=0):
raise RuntimeError('''
Cannot save dataset, since this dataset does not have any data.
''')
if MPI.rank == 0:
data = {property_: data for property_, data
in zip(self._properties, self._dataset)}
info = {
'elemental_composition': self._elemental_composition,
'elements': self._elements,
'tag': self._tag,
}
| np.savez(file_path, **data, **info) | numpy.savez |
#!/usr/bin/env python
# coding: utf-8
"""
File: scrutiny_plot.py
Author: <NAME> <<EMAIL>>
Description: This script generate a data popularity plot based on dbs and
phedex data on hdfs, Based on https://github.com/dmwm/CMSPopularity
"""
import os
import sys
import argparse
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import (
input_file_name,
regexp_extract,
concat,
col,
when,
lit,
last,
max as _max,
min as _min,
datediff,
countDistinct,
avg,
unix_timestamp,
from_unixtime,
)
from pyspark.sql.types import (
StringType,
StructField,
StructType,
LongType,
IntegerType,
DoubleType,
)
from pyspark.sql import Window
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
import seaborn as sns
class OptionParser:
def __init__(self):
"User based option parser"
desc = """
This app create a data popularity plot (scrutiny plot)
based on phedex and dbs hdfs data.
"""
self.parser = argparse.ArgumentParser("Scrutiny Plot", usage=desc)
self.parser.add_argument(
"end_date",
help="Date in yyyyMMdd format.",
nargs="?",
type=str,
default=datetime.strftime(datetime.now() - relativedelta(days=1), "%Y%m%d"),
)
self.parser.add_argument(
"--outputFolder", help="Output folder path", default="./output"
)
self.parser.add_argument(
"--outputFormat", help="Output format (png or pdf)", default="pdf"
)
self.parser.add_argument(
"--allTiers",
action="store_true",
help="By default the plot only takes into account T1 and T2 sites.",
default="False",
)
def fill_nulls(df):
"""
This script tries to fill the gid column, replacing the -1 values.
1. if gid is -1 is replaced with None.
2. if all columns in the row are null, then drop the row.
3. If the gid is none then replace it with the last gid
for that dataset in the same site.
"""
df_na = df.na.replace(-1, None, ["gid"]).na.drop(how="all")
ff = df_na.withColumn(
"gid",
when(col("gid").isNotNull(), col("gid")).otherwise(
last("gid", True).over(
Window.partitionBy("site", "dataset")
.orderBy("date")
.rowsBetween(-sys.maxsize, 0)
)
),
)
return ff
def merge_phedex(start_date, end_date, spark_session, base="hdfs:///cms/phedex"):
"""
Merge the phedex datasets for the given timerange to generate a dataframe with:
site,dateset,min_date,max_date,min_rdate,max_rdate,min_size,max_size,days
"""
_start = datetime.strptime(start_date, "%Y%m%d")
_end = datetime.strptime(end_date, "%Y%m%d")
_n_days = (_end - _start).days + 1
_dates = [
datetime.strftime(_start + timedelta(days=d), "%Y/%m/%d")
for d in range(0, _n_days)
]
_phedex_paths = ["{}/{}/part*".format(base, d) for d in _dates]
sc = spark_session.sparkContext
FileSystem = sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
URI = sc._gateway.jvm.java.net.URI
Path = sc._gateway.jvm.org.apache.hadoop.fs.Path
fs = FileSystem.get(URI("hdfs:///"), sc._jsc.hadoopConfiguration())
l = [url for url in _phedex_paths if fs.globStatus(Path(url))]
schema = StructType(
[
StructField("date", StringType()),
StructField("site", StringType()),
StructField("dataset", StringType()),
StructField("size", LongType()),
StructField("rdate", StringType()),
StructField("gid", IntegerType()),
]
)
_agg_fields = ["date", "size"]
_agg_func = [_min, _max]
# if some column will be used as date, we can add
# .option('dateFormat','yyyyMMdd')
_df = (
spark_session.read.option("basePath", base)
.option("mode", "FAILFAST")
.option("nullValue", "null")
.option("emptyValue", "null")
.csv(l, schema=schema)
)
_df = fill_nulls(_df)
_grouped = (
_df.groupby("site", "dataset", "rdate", "gid").agg(
avg("size"),
countDistinct("date"),
*[fn(c) for c in _agg_fields for fn in _agg_func]
)
).withColumnRenamed("count(DISTINCT date)", "days")
_grouped = _grouped.selectExpr(
*[
"`{}` as {}".format(c, c.replace("(", "_").replace(")", ""))
if "(" in c
else c
for c in _grouped.columns
]
)
_grouped = _grouped.withColumn("avg_size", col("avg_size").astype(LongType()))
return _grouped
# ## The weight of the dataset in the given period is a weigthed average of the size of the replicas in that period
#
def weigthed_size(min_date, max_date, begin, end):
"""
A vectorized approach to calcule the weigth of a size in a given period.
@param x spark dataframe
@param begin first day of the period (as lit column).
@param end last day of the period (as lit column).
"""
_start = when(min_date >= begin, min_date).otherwise(begin)
_end = when((max_date < end), max_date).otherwise(end)
delta = datediff(
from_unixtime(unix_timestamp(_end, "yyyyMMdd")),
from_unixtime(unix_timestamp(_start, "yyyyMMdd")),
) + lit(1)
delta = when((max_date < begin) | (min_date > end), lit(0)).otherwise(delta)
period = datediff(
from_unixtime(unix_timestamp(end, "yyyyMMdd")),
from_unixtime(unix_timestamp(begin, "yyyyMMdd")),
) + lit(1)
x = delta.cast(DoubleType()) / period.cast(DoubleType())
return x
def generate_scrutiny_plot(
end_date,
output_format="pdf",
output_folder="./output",
eventsInputFolder="hdfs:///cms/dbs_events",
basePath_dbs="hdfs:///cms/dbs_condor/dataset",
onlyT1_T2=True,
):
"""
param onlyT1_T2: Take into account only replicas in T1 and T2 sites.
"""
start_date_d = datetime.strptime(end_date, "%Y%m%d") - relativedelta(
months=12, days=-1
)
start_date = datetime.strftime(start_date_d, "%Y%m%d")
midterm = datetime.strftime(start_date_d + relativedelta(months=6), "%Y%m%d")
lastQuarter = datetime.strftime(start_date_d + relativedelta(months=9), "%Y%m%d")
dbsInput = "{}/{{{},{}}}/*/*/part-*".format(
basePath_dbs, start_date[:4], end_date[:4]
)
sc = SparkContext(appName="scrutinyPlotReplicated")
spark = SparkSession.builder.config(conf=sc._conf).getOrCreate()
phedex_df = merge_phedex(start_date, end_date, spark)
if onlyT1_T2:
phedex_df = phedex_df.filter(
col("site").startswith("T1_") | col("site").startswith("T2_")
)
# ## Calculate the effective average size of each dataset in the given periods
# size of each dataset in each of the time periods
phedex_df = phedex_df.withColumn(
"weight_6Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(midterm), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_6Month", col("avg_size") * col("weight_6Month")
)
phedex_df = phedex_df.withColumn(
"weight_3Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(lastQuarter), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_3Month", col("avg_size") * col("weight_3Month")
)
phedex_df = phedex_df.withColumn(
"weight_12Month",
weigthed_size(
phedex_df.min_date, phedex_df.max_date, lit(start_date), lit(end_date)
),
)
phedex_df = phedex_df.withColumn(
"weighted_size_12Month", col("avg_size") * col("weight_3Month")
)
phedex_df = phedex_df.withColumn("min_date", col("rdate"))
_df_dsSzDur = (
phedex_df.groupby("dataset")
.agg(
{
"min_date": "min",
"max_date": "max",
"weighted_size_3Month": "sum",
"weighted_size_6Month": "sum",
"weighted_size_12Month": "sum",
}
)
.toPandas()
)
del phedex_df
# # Read dbs_condor dataset
#
# This dataset, stored in hdfs, will be the base to determine the use of the datasets.
dbs_df = (
spark.read.option("basePath", basePath_dbs)
.csv(dbsInput, header=True)
.select("dataset", "sum_evts")
.withColumn("filename", input_file_name())
)
# ## Filter the dataset
#
# We are only interested on records with datasets. There should be no records with dataset and without events (but currently there are).
# Are there records with dataset but without events (empty sum_evts in the original files)?
# - By default, spark takes empty string as null.
# - In the current version there are rendered as the "null" string instead of null value (this will change on another versions).
dbs_df = dbs_df.filter('dataset != "null" AND sum_evts !="null" AND sum_evts != ""')
zero = dbs_df.filter('sum_evts = "0.0"')
dbs_df = dbs_df.subtract(zero)
dbs_df = dbs_df.withColumn("events", dbs_df.sum_evts.cast("double") * 1000)
dbs_df = dbs_df.withColumn(
"days",
concat(
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 1),
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 2),
regexp_extract(dbs_df.filename, ".*/([0-9]{4})/([0-9]{2})/([0-9]{2})", 3),
),
)
dbs_df = dbs_df.filter("days between {} AND {}".format(start_date, end_date))
# # Use of each dataset per day
_df_agg = (
dbs_df.groupBy("dataset", "days").sum("events").alias("sum_events").toPandas()
)
_plain = _df_agg.rename(columns={"days": "day", "sum(events)": "sum_events"})
del dbs_df
del _df_agg
_plain[_plain.sum_events == 0].head()
_events_hadoop = spark.read.option("basePath", eventsInputFolder).csv(
"{}/part*.csv".format(eventsInputFolder), header=True
)
_events = _events_hadoop.select("dataset", "nevents")
df_dsSzDur = pd.merge(_df_dsSzDur, _events.toPandas(), on="dataset")
df_dsSzDur = df_dsSzDur.rename(
columns={
"sum(weighted_size_12Month)": "size12month",
"sum(weighted_size_3Month)": "size3month",
"sum(weighted_size_6Month)": "size6month",
"max(max_date)": "end",
"min(min_date)": "begin",
"nevents": "nEvents",
}
)
# ## Join the datasets
#
# A inner join to keep only the used datasets.
_merged = pd.merge(df_dsSzDur, _plain, on="dataset", sort=True)
# Rate of the events used over the number of events in the file
_merged["rate"] = _merged.sum_events / _merged.nEvents.astype(float)
# ## Create the desired datasets.
#
# The datasets sixMnts, threeMnts and twelveMnts contains only data for datasets that where used at least once in the given period.
_merged.day = _merged.day.astype("str")
full = _merged
sixMnts = full[full.day >= midterm][["dataset", "size6month", "day", "rate"]]
threeMnts = full[(full.day >= lastQuarter)][
["dataset", "size3month", "day", "rate"]
]
twelveMnts = full[["dataset", "size12month", "day", "rate"]][
np.logical_not(np.isnan(full.rate))
]
# ## Sum the event usage rate
#
sum_3mth = threeMnts.groupby(["dataset", "size3month"]).agg({"rate": "sum"})
sum_6mth = sixMnts.groupby(["dataset", "size6month"]).agg({"rate": "sum"})
sum_12mth = twelveMnts.groupby(["dataset", "size12month"]).agg({"rate": "sum"})
types = {"3 months": sum_3mth, "6 months": sum_6mth, "full year": sum_12mth}
cols = {
"3 months": "size3month",
"6 months": "size6month",
"full year": "size12month",
}
bdates = {"3 months": lastQuarter, "6 months": midterm, "full year": start_date}
gp = None
for _type in list(types.keys()):
_sum = types[_type].reset_index()
# positive values <1 belong to the first bin (one accesss).
_sum.rate = np.where(np.logical_and(_sum.rate < 1, _sum.rate > 0), 1, _sum.rate)
# if there are 0 or negative values they should be in another bin (-currently there are none-).
_sum.rate = np.where(_sum.rate <= 0, -1, _sum.rate)
_sum["rtceil"] = np.round(_sum.rate).astype(np.int)
_sum.rtceil = np.where(_sum.rtceil > 14, 15, _sum.rtceil)
_sum.rtceil = _sum.rtceil.astype(str)
_sum.rtceil = _sum.rtceil.map(lambda x: x.rjust(2, "0"))
# Group higher values
_sum.rtceil.values[_sum.rtceil == "15"] = "15+"
#
_sum.rtceil.values[_sum.rtceil == "-1"] = "-Evts"
# agregate per bin
_gp = _sum.groupby("rtceil").agg({cols[_type]: ["sum", "count"]})
_gp.columns = _gp.columns.droplevel(0)
_gp = _gp.reset_index()
_gp.columns = ["bin", "f_size", "count_ds"]
# Unused data:
# Unused data is data that exists (had a size>0) in the given period but is not in the sum dataframe
# (it was not used in the given period).
_unused = df_dsSzDur[
np.logical_and(
np.logical_not(df_dsSzDur.dataset.isin(_sum.dataset)),
df_dsSzDur[cols[_type]] > 0,
)
]
# old
# Unused old data is unused data that existed before the period begins
_unused_old = _unused.loc[_unused.begin.astype(str) < bdates[_type]]
# new
# Unused new data is unused data created on this period
_unused_new = _unused.loc[_unused.begin.astype(str) >= bdates[_type]]
_gp = _gp.append(
{
"bin": "0-old",
"f_size": np.sum(_unused_old[cols[_type]]),
"count_ds": np.unique(_unused_old["dataset"]).size,
},
ignore_index=True,
)
_gp = _gp.append(
{
"bin": "00-new",
"f_size": | np.sum(_unused_new[cols[_type]]) | numpy.sum |
import numpy
import torch
from torch import autograd
PYTORCH_TYPES = (autograd.Variable, torch.Tensor)
def variance(outputs_expectation_of_products, outputs_expectations):
output_product_of_expectations = (
outputs_expectations * outputs_expectations)
return outputs_expectation_of_products - output_product_of_expectations
def covariance(outputs_expectation_of_products, outputs_expectations):
output_product_of_expectations = (
outer(outputs_expectations, outputs_expectations))
return outputs_expectation_of_products - output_product_of_expectations
def outer(vector, other_vector):
if isinstance(vector, PYTORCH_TYPES):
return torch.ger(vector, other_vector)
else:
return numpy.outer(vector, other_vector)
def centered_covariance(outputs):
if isinstance(outputs, PYTORCH_TYPES):
return torch.mm(outputs.t(), outputs)
else:
return numpy.dot(outputs.T, outputs)
def eig(tensor):
if isinstance(tensor, PYTORCH_TYPES):
u, V = torch.eig(tensor, True)
u = u[:, 0] # remove the imaginary part
else:
u, V = numpy.linalg.eigh(tensor)
return u, V
def svd(tensor, transpose=False):
"""
NOTE
----
By default numpy returns transposed V (like in svd equations) while
torch returns non transposed V. This helper function standardize them
returning transposed V if transpose=True and non transposed ones
otherwise.
"""
if isinstance(tensor, PYTORCH_TYPES):
U, S, V = torch.svd(tensor)
if transpose:
V = V.t()
return U, S, V
else:
U, S, V = | numpy.linalg.svd(tensor) | numpy.linalg.svd |
#!/usr/bin/env python
"""
compare polarization integrals from C++ implementation with reference implementation
"""
import numpy as np
import numpy.linalg as la
from scipy import special
# slow python implementation
from polarization_ints_reference import d_func as d_func_reference
from polarization_ints_reference import h_func as h_func_reference
from polarization_ints_reference import polarization_integral as polarization_integral_reference
# fast C++ implementation
from polarization_integrals import PolarizationIntegral
#
from polarization_integrals import _polarization
def d_func(x, pmin, pmax, w0):
"""
evaluates the integrals (eqns. (23) and (29) in Ref. [CPP] with eta=0)
/x p-1/2
d(a,x) = d(p+1/2,x) = exp(-w0) | dw w exp(w)
/0
for a = p+1/2 with p an integer.
The prefactor exp(-w0) allows to avoid overflows in the exponential.
The function values are generated iteratively for all integers p in the
range p = pmin,pmin+1,..., 0 ,1,2,...,pmax
1) by upward iteration for p=0,1,2,...,pmax (a=1/2,3/2,...,pmax+1/2)
Starting value (p=0)
d[0] = d(1/2,x) = 2 exp(x-w0) dawson(sqrt(x))
Iteration (p -> p+1)
p+1/2
d[p+1] = x exp(x-w0) - (p+1/2) d[p]
2) and by downward iteration for p=0,-1,-2,...,pmin
Iteration (-p -> -(p+1))
-(p+1/2)
d[-(p+1)] = - ( x exp(x-w0) - d[-p] ) / (p+1/2)
Parameters
----------
x : float >= 0
upper limit of integration
pmin, pmax : int
defines range pmin <= p <= pmax
w0 : float
To avoid overflow in the exponential function, exp(-w0) * d(p+1/2,x) is calculated.
Returns
-------
d : array of size (|pmin|+pmax+1,)
exp(-w0) * d(p+1/2,x) in the order p = 0,1,...,pmax,pmin,pmin+1,...,-2,-1
"""
assert pmin <= 0 and pmax >= 0
# output array
d = np.zeros(-pmin+pmax+1)
# constants during iteration
expx = np.exp(x-w0)
sqrtx = np.sqrt(x)
dwsn = special.dawsn(sqrtx)
# initialization p=0
d[0] = 2*expx * dwsn
# 1) upward iteration starting from p=0
xp = sqrtx * expx
for p in range(0,pmax):
d[p+1] = xp - (p+0.5)*d[p]
# x^(p+1/2) * exp(x-w0)
xp *= x
# 2) downward iteration starting from p=0
ixp = 1/sqrtx * expx
for p in range(0,pmin,-1):
d[p-1] = -(ixp - d[p])/(-p+0.5)
# x^(-(p+1/2)) * exp(x-w0)
ixp /= x
return d
def d_func_zero_limit(x, pmin, pmax, w0):
"""
The function \tilde{d} also computes d(p+1/2,x), however without the factor x^{p+1/2}:
~ p+1/2
d(p+1/2,x) = x d(p+1/2,x) for all integers p
This ensures that \tilde{d} has a finite value in the limit x -> 0.
"""
assert pmin <= 0 and pmax >= 0
# output array
dt = np.zeros(-pmin+pmax+1)
# constants during iterations
expx = np.exp(x-w0)
# 1) For p >= 0, \tilde{d} is calculated from the Taylor expansion around x=0.
#
# ~ inf x^k
# d (x) = sum ------------
# p k=0 k! (p+k+1/2)
#
# The Taylor expansion is truncated at k_max = 20
kmax = 20
# y = x^k / k! * exp(-w0)
y = | np.exp(-w0) | numpy.exp |
import numpy as np
import pandas as pd
import pytest
import os
from inspect import currentframe, getframeinfo
from ..flarelc import FlareLightCurve
from ..altai import find_iterative_median
from ..lcio import from_path
from .. import PACKAGEDIR
from . import test_ids, test_paths, pathkepler, pathAltaiPony, pathk2TPF
def mock_flc(origin='TPF', detrended=False, ampl=1., dur=1):
"""
Mocks a FlareLightCurve with a sinusoid variation and a single positive outlier.
Parameter
-----------
origin : 'TPF' or str
Mocks a specific origin, such as 'KLC', 'FLC' etc.
detrended : False or bool
If False, a sinusoid signal is added to the mock light curve.
Return
-------
FlareLightCurve
"""
n = 1000
time = np.arange(0, n/48, 1./48.)
pixel_time = np.outer(time,np.full((3,3), 1)).reshape((1000,3,3))
| np.random.seed(13854) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""CMA-ES (evolution strategy), the main sub-module of `cma` providing
in particular `CMAOptions`, `CMAEvolutionStrategy`, and `fmin`
"""
# TODO (mainly done): remove separable CMA within the code (keep as sampler only)
# TODO (low): implement a (deep enough) copy-constructor for class
# CMAEvolutionStrategy to repeat the same step in different
# configurations for online-adaptation of meta parameters
# TODO (complex): reconsider geno-pheno transformation. Can it be a
# separate module that operates inbetween optimizer and objective?
# Can we still propagate a repair of solutions to the optimizer?
# A repair-only internal geno-pheno transformation is less
# problematic, given the repair is idempotent. In any case, consider
# passing a repair function in the interface instead.
# How about gradients (should be fine)?
# Must be *thoroughly* explored before to switch, in particular the
# order of application of repair and other transformations, as the
# internal repair can only operate on the internal representation.
# TODO: split tell into a variable transformation part and the "pure"
# functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: self.opts['mindx'] is checked without sigma_vec, which is a little
# inconcise. Cheap solution: project sigma_vec on smallest eigenvector?
# TODO: class _CMAStopDict implementation looks way too complicated,
# design generically from scratch?
# TODO: separate display and logging options, those CMAEvolutionStrategy
# instances don't use themselves (probably all?)
# TODO: check scitools.easyviz and how big the adaptation would be
# TODO: separate initialize==reset_state from __init__
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: eigh(): thorough testing would not hurt
# TODO: (partly done) apply style guide
# WON'T FIX ANYTIME SOON (done within fmin): implement bipop in a separate
# algorithm as meta portfolio algorithm of IPOP and a local restart
# option to be implemented
# in fmin (e.g. option restart_mode in [IPOP, local])
# DONE: extend function unitdoctest, or use unittest?
# DONE: copy_always optional parameter does not make much sense,
# as one can always copy the input argument first. Similar,
# copy_if_changed should be keep_arg_unchanged or just copy
# DONE: expand/generalize to dynamically read "signals" from a file
# see import ConfigParser, DictFromTagsInString,
# function read_properties, or myproperties.py (to be called after
# tell()), signals_filename, if given, is parsed in stop()
# DONE: switch to np.loadtxt
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun,
# callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag},
# <allvecs>)
#
# changes:
# 20/04/xx: no negative weights for injected solutions
# 16/10/xx: versatile options are read from signals_filename
# RecombinationWeights refined and work without numpy
# new options: recombination_weights, timeout,
# integer_variable with basic integer handling
# step size parameters removed from CMAEvolutionStrategy class
# ComposedFunction class implements function composition
# 16/10/02: copy_always parameter is gone everywhere, use
# np.array(., copy=True)
# 16/xx/xx: revised doctests with doctest: +ELLIPSIS option, test call(s)
# moved all test related to test.py, is quite clean now
# "python -m cma.test" is how it works now
# 16/xx/xx: cleaning up, all kind of larger changes.
# 16/xx/xx: single file cma.py broken into pieces such that cma has now
# become a package.
# 15/02/xx: (v1.2) sampling from the distribution sampling refactorized
# in class Sampler which also does the (natural gradient)
# update. New AdaptiveDecoding class for sigma_vec.
# 15/01/26: bug fix in multiplyC with sep/diagonal option
# 15/01/20: larger condition numbers for C realized by using tf_pheno
# of GenoPheno attribute gp.
# 15/01/19: injection method, first implementation, short injections
# and long injections with good fitness need to be addressed yet
# 15/01/xx: _prepare_injection_directions to simplify/centralize injected
# solutions from mirroring and TPA
# 14/12/26: bug fix in correlation_matrix computation if np.diag is a view
# 14/12/06: meta_parameters now only as annotations in ## comments
# 14/12/03: unified use of base class constructor call, now always
# super(ThisClass, self).__init__(args_for_base_class_constructor)
# 14/11/29: termination via "stop now" in file cmaes_signals.par
# 14/11/28: bug fix initialization of C took place before setting the
# seed. Now in some dimensions (e.g. 10) results are (still) not
# determistic due to np.linalg.eigh, in some dimensions (<9, 12)
# they seem to be deterministic.
# 14/11/23: bipop option integration, contributed by <NAME>
# 14/09/30: initial_elitism option added to fmin
# 14/08/1x: developing fitness wrappers in FFWrappers class
# 14/08/xx: return value of OOOptimizer.optimize changed to self.
# CMAOptions now need to uniquely match an *initial substring*
# only (via method corrected_key).
# Bug fix in CMAEvolutionStrategy.stop: termination conditions
# are now recomputed iff check and self.countiter > 0.
# Doc corrected that self.gp.geno _is_ applied to x0
# Vaste reorganization/modularization/improvements of plotting
# 14/08/01: bug fix to guaranty pos. def. in active CMA
# 14/06/04: gradient of f can now be used with fmin and/or ask
# 14/05/11: global rcParams['font.size'] not permanently changed anymore,
# a little nicer annotations for the plots
# 14/05/07: added method result_pretty to pretty print optimization result
# 14/05/06: associated show() everywhere with ion() which should solve the
# blocked terminal problem
# 14/05/05: all instances of "unicode" removed (was incompatible to 3.x)
# 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the
# comments before the code starts
# 14/05/xx: change the order of kwargs of OOOptimizer.optimize,
# remove prepare method in AdaptSigma classes, various changes/cleaning
# 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly
# bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1])
# bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied
# 14/02/27: bug fixed when BoundPenalty was combined with fixed variables.
# 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase,
# to make testing different adaptation rules (much) easier
# 12/12/14: separated CMAOptions and arguments to fmin
# 12/10/25: removed useless check_points from fmin interface
# 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
# timesCroot and divCroot to the right class
# 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
# sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
# is used for weight calculation.
# 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound np.exp(min(1,...)) for step-size control
from __future__ import (absolute_import, division, print_function,
) # unicode_literals, with_statement)
# from builtins import ...
from .utilities.python3for2 import range # redefine range in Python 2
import sys
import os
import time # not really essential
import warnings # catch numpy warnings
import ast # for literal_eval
try:
import collections # not available in Python 2.5
except ImportError:
pass
import math
import numpy as np
# arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
# sort, argsort, random, ones,...
from numpy import inf, array
# to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
# removes the imported sum and recovers the shadowed build-in
# import logging
# logging.basicConfig(level=logging.INFO) # only works before logging is used
# logging.info('message') # prints INFO:root:message on red background
# logger = logging.getLogger(__name__) # should not be done during import
# logger.info('message') # prints INFO:cma...:message on red background
# see https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
from . import interfaces
from . import transformations
from . import optimization_tools as ot
from . import sampler
from .constraints_handler import BoundNone, BoundPenalty, BoundTransform, AugmentedLagrangian
from .recombination_weights import RecombinationWeights
from .logger import CMADataLogger # , disp, plot
from .utilities.utils import BlancClass as _BlancClass
from .utilities.utils import rglen #, global_verbosity
from .utilities.utils import pprint
from .utilities.utils import seval as eval
from .utilities.utils import SolutionDict as _SolutionDict
from .utilities.math import Mh
from .sigma_adaptation import *
from . import restricted_gaussian_sampler as _rgs
_where = np.nonzero # to make pypy work, this is how where is used here anyway
del division, print_function, absolute_import #, unicode_literals, with_statement
class InjectionWarning(UserWarning):
"""Injected solutions are not passed to tell as expected"""
# use_archives uses collections
use_archives = sys.version_info[0] >= 3 or sys.version_info[1] >= 6
# use_archives = False # on False some unit tests fail
"""speed up for very large population size. `use_archives` prevents the
need for an inverse gp-transformation, relies on collections module,
not sure what happens if set to ``False``. """
class MetaParameters(object):
"""collection of many meta parameters.
Meta parameters are either annotated constants or refer to
options from `CMAOptions` or are arguments to `fmin` or to the
`NoiseHandler` class constructor.
`MetaParameters` take only effect if the source code is modified by
a meta parameter weaver module searching for ## meta_parameters....
and modifying the next line.
Details
-------
This code contains a single class instance `meta_parameters`
Some interfaces rely on parameters being either `int` or
`float` only. More sophisticated choices are implemented via
``choice_value = {1: 'this', 2: 'or that'}[int_param_value]`` here.
CAVEAT
------
`meta_parameters` should not be used to determine default
arguments, because these are assigned only once and for all during
module import.
"""
def __init__(self):
"""assign settings to be used"""
self.sigma0 = None ## [~0.01, ~10] # no default available
# learning rates and back-ward time horizons
self.CMA_cmean = 1.0 ## [~0.1, ~10] #
self.c1_multiplier = 1.0 ## [~1e-4, ~20] l
self.cmu_multiplier = 2.0 ## [~1e-4, ~30] l # zero means off
self.CMA_active = 1.0 ## [~1e-4, ~10] l # 0 means off, was CMA_activefac
self.cc_multiplier = 1.0 ## [~0.01, ~20] l
self.cs_multiplier = 1.0 ## [~0.01, ~10] l # learning rate for cs
self.CSA_dampfac = 1.0 ## [~0.01, ~10]
self.CMA_dampsvec_fac = None ## [~0.01, ~100] # def=np.Inf or 0.5, not clear whether this is a log parameter
self.CMA_dampsvec_fade = 0.1 ## [0, ~2]
# exponents for learning rates
self.c1_exponent = 2.0 ## [~1.25, 2]
self.cmu_exponent = 2.0 ## [~1.25, 2]
self.cact_exponent = 1.5 ## [~1.25, 2]
self.cc_exponent = 1.0 ## [~0.25, ~1.25]
self.cs_exponent = 1.0 ## [~0.25, ~1.75] # upper bound depends on CSA_clip_length_value
# selection related parameters
self.lambda_exponent = 0.0 ## [0, ~2.5] # usually <= 2, used by adding N**lambda_exponent to popsize-1
self.CMA_elitist = 0 ## [0, 2] i # a choice variable
self.CMA_mirrors = 0.0 ## [0, 0.5) # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
# sampling strategies
# self.CMA_sample_on_sphere_surface = 0 ## [0, 1] i # boolean
self.mean_shift_line_samples = 0 ## [0, 1] i # boolean
self.pc_line_samples = 0 ## [0, 1] i # boolean
# step-size adapation related parameters
self.CSA_damp_mueff_exponent = 0.5 ## [~0.25, ~1.5] # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
self.CSA_disregard_length = 0 ## [0, 1] i
self.CSA_squared = 0 ## [0, 1] i
self.CSA_clip_length_value = None ## [0, ~20] # None reflects inf
# noise handling
self.noise_reeval_multiplier = 1.0 ## [0.2, 4] # usually 2 offspring are reevaluated
self.noise_choose_reeval = 1 ## [1, 3] i # which ones to reevaluate
self.noise_theta = 0.5 ## [~0.05, ~0.9]
self.noise_alphasigma = 2.0 ## [0, 10]
self.noise_alphaevals = 2.0 ## [0, 10]
self.noise_alphaevalsdown_exponent = -0.25 ## [-1.5, 0]
self.noise_aggregate = None ## [1, 2] i # None and 0 == default or user option choice, 1 == median, 2 == mean
# TODO: more noise handling options (maxreevals...)
# restarts
self.restarts = 0 ## [0, ~30] # but depends on popsize inc
self.restart_from_best = 0 ## [0, 1] i # bool
self.incpopsize = 2.0 ## [~1, ~5]
# termination conditions (for restarts)
self.maxiter_multiplier = 1.0 ## [~0.01, ~100] l
self.mindx = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any direction, cave interference with tol*',
self.minstd = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any coordinate direction, cave interference with tol*',
self.maxstd = None ## [~1, ~1e9] l #v maximal std in any coordinate direction, default is inf',
self.tolfacupx = 1e3 ## [~10, ~1e9] l #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
self.tolupsigma = 1e20 ## [~100, ~1e99] l #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
self.tolx = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in x-changes',
self.tolfun = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value, quite useful',
self.tolfunrel = 0 ## [1e-17, ~1e-2] l #v termination criterion: relative tolerance in function value',
self.tolfunhist = 1e-12 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value history',
self.tolstagnation_multiplier = 1.0 ## [0.01, ~100] # ': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
# abandoned:
# self.noise_change_sigma_exponent = 1.0 ## [0, 2]
# self.noise_epsilon = 1e-7 ## [0, ~1e-2] l #
# self.maxfevals = None ## [1, ~1e11] l # is not a performance parameter
# self.lambda_log_multiplier = 3 ## [0, ~10]
# self.lambda_multiplier = 0 ## (0, ~10]
meta_parameters = MetaParameters()
def is_feasible(x, f):
"""default to check feasibility of f-values.
Used for rejection sampling in method `ask_and_eval`.
:See also: CMAOptions, ``CMAOptions('feas')``.
"""
return f is not None and not np.isnan(f)
if use_archives:
class _CMASolutionDict(_SolutionDict):
def __init__(self, *args, **kwargs):
# _SolutionDict.__init__(self, *args, **kwargs)
super(_CMASolutionDict, self).__init__(*args, **kwargs)
self.last_solution_index = 0
# TODO: insert takes 30% of the overall CPU time, mostly in def key()
# with about 15% of the overall CPU time
def insert(self, key, geno=None, iteration=None, fitness=None,
value=None, cma_norm=None):
"""insert an entry with key ``key`` and value
``value if value is not None else {'geno':key}`` and
``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs.
"""
# archive returned solutions, first clean up archive
if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1:
self.truncate(300, iteration - 3)
elif value is not None and value.get('iteration'):
iteration = value['iteration']
if (iteration % 10) < 1:
self.truncate(300, iteration - 3)
self.last_solution_index += 1
if value is not None:
try:
iteration = value['iteration']
except:
pass
if iteration is not None:
if iteration > self.last_iteration:
self.last_solution_index = 0
self.last_iteration = iteration
else:
iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value
if value is not None:
self[key] = value
else:
self[key] = {'pheno': key}
if geno is not None:
self[key]['geno'] = geno
if iteration is not None:
self[key]['iteration'] = iteration
if fitness is not None:
self[key]['fitness'] = fitness
if cma_norm is not None:
self[key]['cma_norm'] = cma_norm
return self[key]
else: # if not use_archives:
class _CMASolutionDict(dict):
"""a hack to get most code examples running"""
def insert(self, *args, **kwargs):
pass
def get(self, key):
return None
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
# ____________________________________________________________
# ____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
_debugging = False # not in use
_new_injections = True
_assertions_quadratic = True # issue warnings
_assertions_cubic = True
_depreciated = True
def cma_default_options_( # to get keyword completion back
# the follow string arguments are evaluated if they do not contain "filename"
AdaptSigma='True # or False or any CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA, CMAAdaptSigmaCSA',
CMA_active='True # negative update, conducted after the original update',
# CMA_activefac='1 # learning rate multiplier for active update',
CMA_cmean='1 # learning rate for the mean value',
CMA_const_trace='False # normalize trace, 1, True, "arithm", "geom", "aeig", "geig" are valid',
CMA_diagonal='0*100*N/popsize**0.5 # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable?
CMA_eigenmethod='np.linalg.eigh # or cma.utilities.math.eig or pygsl.eigen.eigenvectors',
CMA_elitist='False #v or "initial" or True, elitism likely impairs global search performance',
CMA_injections_threshold_keep_len='1 #v keep length if Mahalanobis length is below the given relative threshold',
CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
CMA_mirrormethod='2 # 0=unconditional, 1=selective, 2=selective with delay',
CMA_mu='None # parents selection parameter, default is popsize // 2',
CMA_on='1 # multiplier for all covariance matrix updates',
# CMA_sample_on_sphere_surface='False #v replaced with option randn=cma.utilities.math.randhss, all mutation vectors have the same length, currently (with new_sampling) not in effect',
CMA_sampler='None # a class or instance that implements the interface of `cma.interfaces.StatisticalModelSamplerWithZeroMeanBaseClass`',
CMA_sampler_options='{} # options passed to `CMA_sampler` class init as keyword arguments',
CMA_rankmu='1.0 # multiplier for rank-mu update learning rate of covariance matrix',
CMA_rankone='1.0 # multiplier for rank-one update learning rate of covariance matrix',
CMA_recombination_weights='None # a list, see class RecombinationWeights, overwrites CMA_mu and popsize options',
CMA_dampsvec_fac='np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update',
CMA_dampsvec_fade='0.1 # tentative fading out parameter for sigma vector update',
CMA_teststds='None # factors for non-isotropic initial distr. of C, mainly for test purpose, see CMA_stds for production',
CMA_stds='None # multipliers for sigma0 in each coordinate, not represented in C, makes scaling_of_variables obsolete',
# CMA_AII='False # not yet tested',
CSA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere',
CSA_damp_mueff_exponent='0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
CSA_disregard_length='False #v True is untested, also changes respective parameters',
CSA_clip_length_value='None #v poorly tested, [0, 0] means const length N**0.5, [-1, 1] allows a variation of +- N/(N+2), etc.',
CSA_squared='False #v use squared length for sigma-adaptation ',
BoundaryHandler='BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``',
bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector',
# , eval_parallel2='not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes'
# 'callback='None # function or list of functions called as callback(self) at the end of the iteration (end of tell)', # only necessary in fmin and optimize
conditioncov_alleviate='[1e8, 1e12] # when to alleviate the condition in the coordinates and in main axes',
eval_final_mean='True # evaluate the final mean, which is a favorite return candidate',
fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized',
ftarget='-inf #v target function value, minimization',
integer_variables='[] # index list, invokes basic integer handling: prevent std dev to become too small in the given variables',
is_feasible='is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)',
maxfevals='inf #v maximum number of function evaluations',
maxiter='100 + 150 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',
mean_shift_line_samples='False #v sample two new solutions colinear to previous mean shift',
mindx='0 #v minimal std in any arbitrary direction, cave interference with tol*',
minstd='0 #v minimal std (scalar or vector) in any coordinate direction, cave interference with tol*',
maxstd='inf #v maximal std in any coordinate direction',
pc_line_samples='False #v one line sample along the evolution path pc',
popsize='4+int(3*np.log(N)) # population size, AKA lambda, number of new solution per iteration',
randn='np.random.randn #v randn(lam, N) must return an np.array of shape (lam, N), see also cma.utilities.math.randhss',
scaling_of_variables='''None # deprecated, rather use fitness_transformations.ScaleCoordinates instead (or possibly CMA_stds).
Scale for each variable in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by
scaling_of_variables and sigma is unchanged, default is `np.ones(N)`''',
seed='time # random number seed for `numpy.random`; `None` and `0` equate to `time`, `np.nan` means "do nothing", see also option "randn"',
signals_filename='cma_signals.in # read versatile options from this file (use `None` or `""` for no file) which contains a single options dict, e.g. ``{"timeout": 0}`` to stop, string-values are evaluated, e.g. "np.inf" is valid',
termination_callback='[] #v a function or list of functions returning True for termination, called in `stop` with `self` as argument, could be abused for side effects',
timeout='inf #v stop if timeout seconds are exceeded, the string "2.5 * 60**2" evaluates to 2 hours and 30 minutes',
tolconditioncov='1e14 #v stop if the condition of the covariance matrix is above `tolconditioncov`',
tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(eivenvals(C)**0.5) indicates "creeping behavior" with usually minor improvements',
tolflatfitness='1 #v iterations tolerated with flat fitness before termination',
tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful',
tolfunhist='1e-12 #v termination criterion: tolerance in function value history',
tolfunrel='0 #v termination criterion: relative tolerance in function value: Delta f current < tolfunrel * (median0 - median_min)',
tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
tolx='1e-11 #v termination criterion: tolerance in x-changes',
transformation='''None # depreciated, use cma.fitness_transformations.FitnessTransformation instead.
[t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno),
t1 is the (optional) back transformation, see class GenoPheno''',
typical_x='None # used with scaling_of_variables',
updatecovwait='None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution?
verbose='3 #v verbosity e.g. of initial/final message, -1 is very quiet, -9 maximally quiet, may not be fully implemented',
verb_append='0 # initial evaluation counter, if append, do not overwrite output files',
verb_disp='100 #v verbosity: display console output every verb_disp iteration',
verb_filenameprefix=CMADataLogger.default_prefix + ' # output path and filenames prefix',
verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions',
verb_log_expensive='N * (N <= 50) # allow to execute eigendecomposition for logging every verb_log_expensive iteration, 0 or False for never',
verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration',
verb_time='True #v output timings on console',
vv='{} #? versatile set or dictionary for hacking purposes, value found in self.opts["vv"]'
):
"""use this function to get keyword completion for `CMAOptions`.
``cma.CMAOptions('substr')`` provides even substring search.
returns default options as a `dict` (not a `cma.CMAOptions` `dict`).
"""
return dict(locals()) # is defined before and used by CMAOptions, so it can't return CMAOptions
cma_default_options = cma_default_options_() # will later be reassigned as CMAOptions(dict)
cma_versatile_options = tuple(sorted(k for (k, v) in cma_default_options.items()
if v.find(' #v ') > 0))
cma_allowed_options_keys = dict([s.lower(), s] for s in cma_default_options)
class CMAOptions(dict):
"""a dictionary with the available options and their default values
for class `CMAEvolutionStrategy`.
``CMAOptions()`` returns a `dict` with all available options and their
default values with a comment string.
``CMAOptions('verb')`` returns a subset of recognized options that
contain 'verb' in there keyword name or (default) value or
description.
``CMAOptions(opts)`` returns the subset of recognized options in
``dict(opts)``.
Option values can be "written" in a string and, when passed to `fmin`
or `CMAEvolutionStrategy`, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are given
as such a string.
Details
-------
`CMAOptions` entries starting with ``tol`` are termination
"tolerances".
For `tolstagnation`, the median over the first and the second half
of at least `tolstagnation` iterations are compared for both, the
per-iteration best and per-iteration median function value.
Example
-------
::
import cma
cma.CMAOptions('tol')
is a shortcut for ``cma.CMAOptions().match('tol')`` that returns all
options that contain 'tol' in their name or description.
To set an option::
import cma
opts = cma.CMAOptions()
opts.set('tolfun', 1e-12)
opts['tolx'] = 1e-11
todo: this class is overly complex and should be re-written, possibly
with reduced functionality.
:See also: `fmin` (), `CMAEvolutionStrategy`, `_CMAParameters`
"""
# @classmethod # self is the class, not the instance
# @property
# def default(self):
# """returns all options with defaults"""
# return fmin([],[])
@staticmethod
def defaults():
"""return a dictionary with default option values and description"""
return cma_default_options
# return dict((str(k), str(v)) for k, v in cma_default_options_().items())
# getting rid of the u of u"name" by str(u"name")
# return dict(cma_default_options)
@staticmethod
def versatile_options():
"""return list of options that can be changed at any time (not
only be initialized).
Consider that this list might not be entirely up
to date.
The string ' #v ' in the default value indicates a versatile
option that can be changed any time, however a string will not
necessarily be evaluated again.
"""
return cma_versatile_options
# return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
def check(self, options=None):
"""check for ambiguous keys and move attributes into dict"""
self.check_values(options)
self.check_attributes(options)
self.check_values(options)
return self
def check_values(self, options=None):
corrected_key = CMAOptions().corrected_key # caveat: infinite recursion
validated_keys = []
original_keys = []
if options is None:
options = self
for key in options:
correct_key = corrected_key(key)
if correct_key is None:
raise ValueError("""%s is not a valid option.\n"""
'Valid options are %s' %
(key, str(list(cma_default_options))))
if correct_key in validated_keys:
if key == correct_key:
key = original_keys[validated_keys.index(key)]
raise ValueError("%s was not a unique key for %s option"
% (key, correct_key))
validated_keys.append(correct_key)
original_keys.append(key)
return options
def check_attributes(self, opts=None):
"""check for attributes and moves them into the dictionary"""
if opts is None:
opts = self
if 11 < 3:
if hasattr(opts, '__dict__'):
for key in opts.__dict__:
if key not in self._attributes:
raise ValueError("""
Assign options with ``opts['%s']``
instead of ``opts.%s``
""" % (opts.__dict__.keys()[0],
opts.__dict__.keys()[0]))
return self
else:
# the problem with merge is that ``opts['ftarget'] = new_value``
# would be overwritten by the old ``opts.ftarget``.
# The solution here is to empty opts.__dict__ after the merge
if hasattr(opts, '__dict__'):
for key in list(opts.__dict__):
if key in self._attributes:
continue
utils.print_warning(
"""
An option attribute has been merged into the dictionary,
thereby possibly overwriting the dictionary value, and the
attribute has been removed. Assign options with
``opts['%s'] = value`` # dictionary assignment
or use
``opts.set('%s', value) # here isinstance(opts, CMAOptions)
instead of
``opts.%s = value`` # attribute assignment
""" % (key, key, key), 'check', 'CMAOptions')
opts[key] = opts.__dict__[key] # getattr(opts, key)
delattr(opts, key) # is that cosher?
# delattr is necessary to prevent that the attribute
# overwrites the dict entry later again
return opts
def __init__(self, s=None, **kwargs):
"""return an `CMAOptions` instance.
Return default options if ``s is None and not kwargs``,
or all options whose name or description contains `s`, if
`s` is a (search) string (case is disregarded in the match),
or with entries from dictionary `s` as options,
or with kwargs as options if ``s is None``,
in any of the latter cases not complemented with default options
or settings.
Returns: see above.
Details: as several options start with ``'s'``, ``s=value`` is
not valid as an option setting.
"""
# if not CMAOptions.defaults: # this is different from self.defaults!!!
# CMAOptions.defaults = fmin([],[])
if s is None and not kwargs:
super(CMAOptions, self).__init__(CMAOptions.defaults()) # dict.__init__(self, CMAOptions.defaults()) should be the same
# self = CMAOptions.defaults()
s = 'nocheck'
elif utils.is_str(s) and not s.startswith('unchecked'):
super(CMAOptions, self).__init__(CMAOptions().match(s))
# we could return here
s = 'nocheck'
elif isinstance(s, dict):
if kwargs:
raise ValueError('Dictionary argument must be the only argument')
super(CMAOptions, self).__init__(s)
elif kwargs and (s is None or s.startswith('unchecked')):
super(CMAOptions, self).__init__(kwargs)
else:
raise ValueError('The first argument must be a string or a dict or a keyword argument or `None`')
if not utils.is_str(s) or not s.startswith(('unchecked', 'nocheck')):
# was main offender
self.check() # caveat: infinite recursion
for key in list(self.keys()):
correct_key = self.corrected_key(key)
if correct_key not in CMAOptions.defaults():
utils.print_warning('invalid key ``' + str(key) +
'`` removed', '__init__', 'CMAOptions')
self.pop(key)
elif key != correct_key:
self[correct_key] = self.pop(key)
# self.evaluated = False # would become an option entry
self._lock_setting = False
self._attributes = self.__dict__.copy() # are not valid keys
self._attributes['_attributes'] = len(self._attributes)
def init(self, dict_or_str, val=None, warn=True):
"""initialize one or several options.
Arguments
---------
`dict_or_str`
a dictionary if ``val is None``, otherwise a key.
If `val` is provided `dict_or_str` must be a valid key.
`val`
value for key
Details
-------
Only known keys are accepted. Known keys are in `CMAOptions.defaults()`
"""
# dic = dict_or_key if val is None else {dict_or_key:val}
self.check(dict_or_str)
dic = dict_or_str
if val is not None:
dic = {dict_or_str:val}
for key, val in dic.items():
key = self.corrected_key(key)
if key not in CMAOptions.defaults():
# TODO: find a better solution?
if warn:
print('Warning in cma.CMAOptions.init(): key ' +
str(key) + ' ignored')
else:
self[key] = val
return self
def set(self, dic, val=None, force=False):
"""assign versatile options.
Method `CMAOptions.versatile_options` () gives the versatile
options, use `init()` to set the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, `val` must be provided
`val`
value for `key`, approximate match is sufficient
`force`
force setting of non-versatile options, use with caution
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
"""
if val is not None: # dic is a key in this case
dic = {dic:val} # compose a dictionary
for key_original, val in list(dict(dic).items()):
key = self.corrected_key(key_original)
if (not self._lock_setting or
key in CMAOptions.versatile_options() or
force):
self[key] = val
else:
utils.print_warning('key ' + str(key_original) +
' ignored (not recognized as versatile)',
'set', 'CMAOptions')
return self # to allow o = CMAOptions(o).set(new)
def complement(self):
"""add all missing options with their default values"""
# add meta-parameters, given options have priority
self.check()
for key in CMAOptions.defaults():
if key not in self:
self[key] = CMAOptions.defaults()[key]
return self
@property
def settable(self):
"""return the subset of those options that are settable at any
time.
Settable options are in `versatile_options` (), but the
list might be incomplete.
"""
return CMAOptions(dict(i for i in list(self.items())
if i[0] in CMAOptions.versatile_options()))
def __call__(self, key, default=None, loc=None):
"""evaluate and return the value of option `key` on the fly, or
return those options whose name or description contains `key`,
case disregarded.
Details
-------
Keys that contain `filename` are not evaluated.
For ``loc==None``, `self` is used as environment
but this does not define ``N``.
:See: `eval()`, `evalall()`
"""
try:
val = self[key]
except:
return self.match(key)
if loc is None:
loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
try:
if utils.is_str(val):
val = val.split('#')[0].strip() # remove comments
if key.find('filename') < 0:
# and key.find('mindx') < 0:
val = eval(val, globals(), loc)
# invoke default
# TODO: val in ... fails with array type, because it is applied element wise!
# elif val in (None,(),[],{}) and default is not None:
elif val is None and default is not None:
val = eval(str(default), globals(), loc)
except:
pass # slighly optimistic: the previous is bug-free
return val
def corrected_key(self, key):
"""return the matching valid key, if ``key.lower()`` is a unique
starting sequence to identify the valid key, ``else None``
"""
matching_keys = []
key = key.lower() # this was somewhat slow, so it is speed optimized now
if key in cma_allowed_options_keys:
return cma_allowed_options_keys[key]
for allowed_key in cma_allowed_options_keys:
if allowed_key.startswith(key):
if len(matching_keys) > 0:
return None
matching_keys.append(allowed_key)
return matching_keys[0] if len(matching_keys) == 1 else None
def eval(self, key, default=None, loc=None, correct_key=True):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need ``N`` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
# TODO: try: loc['dim'] = loc['N'] etc
if correct_key:
# in_key = key # for debugging only
key = self.corrected_key(key)
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None, defaults=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
self.check()
if defaults is None:
defaults = cma_default_options_()
# TODO: this needs rather the parameter N instead of loc
if 'N' in loc: # TODO: __init__ of CMA can be simplified
popsize = self('popsize', defaults['popsize'], loc)
for k in list(self.keys()):
k = self.corrected_key(k)
self.eval(k, defaults[k],
{'N':loc['N'], 'popsize':popsize})
self._lock_setting = True
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return CMAOptions(res)
@property
def to_namedtuple(self):
"""return options as const attributes of the returned object,
only useful for inspection. """
raise NotImplementedError
# return collections.namedtuple('CMAOptionsNamedTuple',
# self.keys())(**self)
def from_namedtuple(self, t):
"""update options from a `collections.namedtuple`.
:See also: `to_namedtuple`
"""
return self.update(t._asdict())
def pprint(self, linebreak=80):
for i in sorted(self.items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
try:
collections.namedtuple
except:
pass
else:
class CMAEvolutionStrategyResult(collections.namedtuple(
'CMAEvolutionStrategyResult', [
'xbest',
'fbest',
'evals_best',
'evaluations',
'iterations',
'xfavorite',
'stds',
'stop',
])):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position and as attribute
- 0 ``xbest`` best solution evaluated
- 1 ``fbest`` objective function value of best solution
- 2 ``evals_best`` evaluation count when ``xbest`` was evaluated
- 3 ``evaluations`` evaluations overall done
- 4 ``iterations``
- 5 ``xfavorite`` distribution mean in "phenotype" space, to be
considered as current best estimate of the optimum
- 6 ``stds`` effective standard deviations, can be used to
compute a lower bound on the expected coordinate-wise distance
to the true optimum, which is (very) approximately stds[i] *
dimension**0.5 / min(mueff, dimension) / 1.5 / 5 ~ std_i *
dimension**0.5 / min(popsize / 2, dimension) / 5, where
dimension = CMAEvolutionStrategy.N and mueff =
CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
- 7 ``stop`` termination conditions in a dictionary
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing
this docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled
solution of the last completed iteration in ``pop_sorted``.
"""
cma_default_options = CMAOptions(cma_default_options_())
class _CMAEvolutionStrategyResult(tuple):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position
- 0 best solution evaluated, ``xbest``
- 1 objective function value of best solution, ``f(xbest)``
- 2 evaluation count when ``xbest`` was evaluated
- 3 evaluations overall done
- 4 iterations
- 5 distribution mean in "phenotype" space, to be considered as
current best estimate of the optimum
- 6 effective standard deviations, give a lower bound on the expected
coordinate-wise distance to the true optimum of (very) approximately
std_i * dimension**0.5 / min(mueff, dimension) / 1.2 / 5
~ std_i * dimension**0.5 / min(popsize / 0.4, dimension) / 5, where
mueff = CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing this
docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled solution
of the last completed iteration in ``pop_sorted``.
""" # here starts the code: (beating the code folding glitch)
# remark: a tuple is immutable, hence we cannot change it anymore
# in __init__. This would work if we inherited from a `list`.
@staticmethod
def _generate(self):
"""return a results tuple of type `CMAEvolutionStrategyResult`.
`_generate` is a surrogate for the ``__init__`` method, which
cannot be used to initialize the immutable `tuple` super class.
"""
return _CMAEvolutionStrategyResult(
self.best.get() + ( # (x, f, evals) triple
self.countevals,
self.countiter,
self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair),
self.gp.scales * self.sigma * self.sigma_vec.scaling *
self.dC**0.5))
class CMAEvolutionStrategy(interfaces.OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
Calling Sequences
=================
- ``es = CMAEvolutionStrategy(x0, sigma0)``
- ``es = CMAEvolutionStrategy(x0, sigma0, opts)``
- ``es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)``
- ::
res = CMAEvolutionStrategy(x0, sigma0,
opts).optimize(objective_fct).result
Arguments
=========
`x0`
initial solution, starting point. `x0` is given as "phenotype"
which means, if::
opts = {'transformation': [transform, inverse]}
is given and ``inverse is None``, the initial mean is not
consistent with `x0` in that ``transform(mean)`` does not
equal to `x0` unless ``transform(mean)`` equals ``mean``.
`sigma0`
initial standard deviation. The problem variables should
have been scaled, such that a single standard deviation
on all variables is useful and the optimum is expected to
lie within about `x0` +- ``3*sigma0``. Often one wants to
check for solutions close to the initial point. This allows,
for example, for an easier check of consistency of the
objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `CMAOptions`.
Main interface / usage
======================
The interface is inherited from the generic `OOOptimizer`
class (see also there). An object instance is generated from::
es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
The least verbose interface is via the optimize method::
es.optimize(objective_func)
res = es.result
More verbosely, the optimization is done using the
methods `stop`, `ask`, and `tell`::
while not es.stop():
solutions = es.ask()
es.tell(solutions, [cma.ff.rosen(s) for s in solutions])
es.disp()
es.result_pretty()
where `ask` delivers new candidate solutions and `tell` updates
the `optim` instance by passing the respective function values
(the objective function `cma.ff.rosen` can be replaced by any
properly defined objective function, see `cma.ff` for more
examples).
To change an option, for example a termination condition to
continue the optimization, call::
es.opts.set({'tolfacupx': 1e4})
The class `CMAEvolutionStrategy` also provides::
(solutions, func_values) = es.ask_and_eval(objective_func)
and an entire optimization can also be written like::
while not es.stop():
es.tell(*es.ask_and_eval(objective_func))
Besides for termination criteria, in CMA-ES only the ranks of the
`func_values` are relevant.
Attributes and Properties
=========================
- `inputargs`: passed input arguments
- `inopts`: passed options
- `opts`: actually used options, some of them can be changed any
time via ``opts.set``, see class `CMAOptions`
- `popsize`: population size lambda, number of candidate
solutions returned by `ask` ()
- `logger`: a `CMADataLogger` instance utilized by `optimize`
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
... # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234...)
and optimize the ellipsoid function
>>> es.optimize(cma.ff.elli, verb_disp=1) # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 2.09...
>>> assert len(es.result) == 8
>>> assert es.result[1] < 1e-9
The optimization loop can also be written explicitly:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1) # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=...
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.ff.elli(x) for x in X])
... es.disp() # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 ...
achieving the same result as above.
An example with lower bounds (at zero) and handling infeasible
solutions:
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5,
... {'bounds': [0, np.inf]}) #doctest: +ELLIPSIS
(5_w,...
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = None
... while curr_fit in (None, np.NaN):
... x = es.ask(1)[0]
... curr_fit = cma.ff.somenan(x, cma.ff.elli) # might return np.NaN
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... es.logger.add()
... es.disp() #doctest: +ELLIPSIS
Itera...
>>>
>>> assert es.result[1] < 1e-9
>>> assert es.result[2] < 9000 # by internal termination
>>> # es.logger.plot() # will plot data
>>> # cma.s.figshow() # display plot window
An example with user-defined transformation, in this case to realize
a lower bound of 2.
>>> import warnings
>>> with warnings.catch_warnings(record=True) as warns:
... es = cma.CMAEvolutionStrategy(5 * [3], 0.1,
... {"transformation": [lambda x: x**2+1.2, None],
... "verbose": -2,})
>>> warns[0].message # doctest:+ELLIPSIS
UserWarning('in class GenoPheno: user defined transformations have not been tested thoroughly ()'...
>>> warns[1].message # doctest:+ELLIPSIS
UserWarning('computed initial point...
>>> es.optimize(cma.ff.rosen, verb_disp=0) #doctest: +ELLIPSIS
<cma...
>>> assert cma.ff.rosen(es.result[0]) < 1e-7 + 5.54781521192
>>> assert es.result[2] < 3300
The inverse transformation is (only) necessary if the `BoundPenalty`
boundary handler is used at the same time.
The `CMAEvolutionStrategy` class also provides a default logger
(cave: files are overwritten when the logger is used with the same
filename prefix):
>>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
>>> es.logger.disp_header() # annotate the print of disp
Iterat Nfevals function value axis ratio maxstd minstd
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.ff.sphere(x) for x in X])
... es.logger.add() # log current iteration
... es.logger.disp([-1]) # display info for last iteration #doctest: +ELLIPSIS
1 ...
>>> es.logger.disp_header()
Iterat Nfevals function value axis ratio maxstd minstd
>>> # es.logger.plot() # will make a plot
Example implementing restarts with increasing popsize (IPOP):
>>> bestever = cma.optimization_tools.BestSolution()
>>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7
... es = cma.CMAEvolutionStrategy(6 - 8 * np.random.rand(4), # 4-D
... 5, # initial std sigma0
... {'popsize': lam, # options
... 'verb_append': bestever.evalsall})
... # logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.ff.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... # logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.s.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... # logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break #doctest: +ELLIPSIS
(5_w,...
>>> assert es.result[1] < 1e-8
On the Rastrigin function, usually after five restarts the global
optimum is located.
Using the `multiprocessing` module, we can evaluate the function in
parallel with a simple modification of the example (however
multiprocessing seems not always reliable):
>>> from cma.fitness_functions import elli # cannot be an instance method
>>> from cma.optimization_tools import EvalParallel2
>>> es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10}) # doctest:+ELLIPSIS
(6_w,13)-aCMA-ES (mu_w=...
>>> with EvalParallel2(elli, es.popsize + 1) as eval_all:
... while not es.stop():
... X = es.ask()
... es.tell(X, eval_all(X))
... es.disp()
... # es.logger.add() # doctest:+ELLIPSIS
Iterat...
The final example shows how to resume:
>>> import pickle
>>>
>>> es0 = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.12) # initial std sigma0
... #doctest: +ELLIPSIS
(5_w,...
>>> es0.optimize(cma.ff.rosen, iterations=100) #doctest: +ELLIPSIS
I...
>>> s = es0.pickle_dumps() # return pickle.dumps(es) with safeguards
>>> # save string s to file like open(filename, 'wb').write(s)
>>> del es0 # let's start fresh
>>> # s = open(filename, 'rb').read() # load string s from file
>>> es = pickle.loads(s) # read back es instance from string
>>> # resuming
>>> es.optimize(cma.ff.rosen, verb_disp=200) #doctest: +ELLIPSIS
200 ...
>>> assert es.result[2] < 15000
>>> assert cma.s.Mh.vequals_approximately(es.result[0], 12 * [1], 1e-5)
>>> assert len(es.result) == 8
Details
=======
The following two enhancements are implemented, the latter is turned
on by default for very small population size only.
*Active CMA* is implemented with option ``CMA_active`` and
conducts an update of the covariance matrix with negative weights.
The negative update is implemented, such that positive definiteness
is guarantied. A typical speed up factor (number of f-evaluations)
is between 1.1 and two.
References: <NAME> Arnold, Improving evolution strategies
through active covariance matrix adaptation, CEC 2006.
Hansen, The CMA evolution strategy: a tutorial, arXiv 2016.
*Selective mirroring* is implemented with option ``CMA_mirrors``
in the method `get_mirror` and `get_selective_mirrors`.
The method `ask_and_eval` (used by `fmin`) will then sample
selectively mirrored vectors within the iteration
(``CMA_mirrormethod==1``). Otherwise, or if ``CMA_mirromethod==2``,
selective mirrors are injected for the next iteration.
In selective mirroring, only the worst solutions are mirrored. With
the default small number of mirrors, *pairwise selection* (where at
most one of the two mirrors contribute to the update of the
distribution mean) is implicitly guarantied under selective
mirroring and therefore not explicitly implemented.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
:See also: `fmin` (), `OOOptimizer`, `CMAOptions`, `plot` (), `ask` (),
`tell` (), `ask_and_eval` ()
""" # here starts the code: (beating the code folding glitch)
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by `ask` ()
"""
return self.sp.popsize
# this is not compatible with python2.5:
# @popsize.setter
# def popsize(self, p):
# """popsize cannot be set (this might change in future)
# """
# raise RuntimeError("popsize cannot be changed")
def stop(self, check=True, ignore_list=(), check_in_same_iteration=False,
get_value=None):
"""return the termination status as dictionary.
With ``check == False``, the termination conditions are not checked
and the status might not reflect the current situation.
``check_on_same_iteration == False`` (new) does not re-check during
the same iteration. When termination options are manually changed,
it must be set to `True` to advance afterwards.
``stop().clear()`` removes the currently active termination
conditions.
As a convenience feature, keywords in `ignore_list` are removed from
the conditions.
If `get_value` is set to a condition name (not the empty string),
`stop` does not update the termination dictionary but returns the
measured value that would be compared to the threshold. This only
works for some conditions, like 'tolx'. If the condition name is
not known or cannot be computed, `None` is returned and no warning
is issued.
Testing `get_value` functionality:
>>> import cma
>>> es = cma.CMAEvolutionStrategy(2 * [1], 1e4, {'verbose': -9})
>>> with warnings.catch_warnings(record=True) as w:
... es.stop(get_value='tolx') # triggers zero iteration warning
... assert len(w) == 1, [str(wi) for wi in w]
>>> es = es.optimize(cma.ff.sphere, iterations=4)
>>> assert 1e3 < es.stop(get_value='tolx') < 1e4, es.stop(get_value='tolx')
>>> assert es.stop() == {}
>>> assert es.stop(get_value='catch 22') is None
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = utils.ListOfCallables(self.opts['termination_callback'])(self)
self._stopdict._get_value = get_value # a hack to avoid passing arguments down to _add_stop and back
# check_on_same_iteration == False makes como code much faster
res = self._stopdict(self, check_in_same_iteration or get_value or ( # update the stopdict and return a Dict (self)
check and self.countiter != self._stopdict.lastiter))
if ignore_list:
for key in ignore_list:
res.pop(key, None)
if get_value: # deliver _value and reset
res, self._stopdict._value = self._stopdict._value, None
return res
def __init__(self, x0, sigma0, inopts=None):
"""see class `CMAEvolutionStrategy`
"""
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
if inopts is None:
inopts = {}
self.inopts = inopts
opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions()
if opts.eval('verbose') is None:
opts['verbose'] = CMAOptions()['verbose']
utils.global_verbosity = global_verbosity = opts.eval('verbose')
if global_verbosity < -8:
opts['verb_disp'] = 0
opts['verb_log'] = 0
opts['verb_plot'] = 0
if 'noise_handling' in opts and opts.eval('noise_handling'):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if 'restarts' in opts and opts.eval('restarts'):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
self._set_x0(x0) # manage weird shapes, set self.x0
self.N_pheno = len(self.x0)
self.sigma0 = sigma0
if utils.is_str(sigma0):
raise ValueError("sigma0 must be a scalar, a string is no longer permitted")
# self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise ValueError('input argument sigma0 must be (or evaluate to) a scalar')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
N = self.N_pheno
if utils.is_str(opts['fixed_variables']):
opts['fixed_variables'] = ast.literal_eval(
opts['fixed_variables'])
assert (isinstance(opts['fixed_variables'], dict)
or opts['fixed_variables'] is None)
if isinstance(opts['fixed_variables'], dict):
N = self.N_pheno - len(opts['fixed_variables'])
opts.evalall(locals()) # using only N
if np.isinf(opts['CMA_diagonal']):
opts['CMA_diagonal'] = True
self.opts = opts
self.randn = opts['randn']
if not utils.is_nan(opts['seed']):
if self.randn is np.random.randn:
if not opts['seed'] or opts['seed'] is time:
np.random.seed()
six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
opts['seed'] = int(1e5 * np.random.rand() + six_decimals
+ 1e5 * (time.time() % 1))
np.random.seed(opts['seed']) # a printable seed
elif opts['seed'] not in (None, time):
utils.print_warning("seed=%s will never be used (seed is only used if option 'randn' is np.random.randn)"
% str(opts['seed']))
self.gp = transformations.GenoPheno(self.N_pheno,
opts['scaling_of_variables'],
opts['typical_x'],
opts['fixed_variables'],
opts['transformation'])
self.boundary_handler = opts['BoundaryHandler']
if isinstance(self.boundary_handler, type):
self.boundary_handler = self.boundary_handler(opts['bounds'])
elif opts['bounds'] not in (None, False, [], [None, None]):
utils.print_warning("""
Option 'bounds' ignored because a BoundaryHandler *instance* was found.
Consider to pass only the desired BoundaryHandler class. """,
CMAEvolutionStrategy.__init__)
if not self.boundary_handler.has_bounds():
self.boundary_handler = BoundNone() # just a little faster and well defined
elif not self.boundary_handler.is_in_bounds(self.x0):
if opts['verbose'] >= 0:
utils.print_warning("""
Initial solution is out of the domain boundaries:
x0 = %s
ldom = %s
udom = %s
THIS MIGHT LEAD TO AN EXCEPTION RAISED LATER ON.
""" % (str(self.gp.pheno(self.x0)),
str(self.boundary_handler.bounds[0]),
str(self.boundary_handler.bounds[1])),
'__init__', 'CMAEvolutionStrategy')
# set self.mean to geno(x0)
tf_geno_backup = self.gp.tf_geno
if self.gp.tf_pheno and self.gp.tf_geno is None:
self.gp.tf_geno = lambda x: x # a hack to avoid an exception
utils.print_warning(
"computed initial point may well be wrong, because no\n"
"inverse for the user provided phenotype transformation "
"was given")
self.mean = self.gp.geno(np.array(self.x0, copy=True),
from_bounds=self.boundary_handler.inverse,
copy=False)
self.mean0 = array(self.mean, copy=True) # relevant for initial injection
self.gp.tf_geno = tf_geno_backup
# without copy_always interface:
# self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False)
self.N = len(self.mean)
assert N == self.N
# self.fmean = np.NaN # TODO name should change? prints nan in output files (OK with matlab&octave)
# self.fmean_noise_free = 0. # for output only
self.sp = _CMAParameters(N, opts, verbose=opts['verbose'] > 0)
self.sp0 = self.sp # looks useless, as it is not a copy
def instantiate_adapt_sigma(adapt_sigma, self):
"""return instantiated sigma adaptation object"""
if adapt_sigma is None:
utils.print_warning(
"Value `None` for option 'AdaptSigma' is ambiguous and\n"
"hence deprecated. AdaptSigma can be set to `True` or\n"
"`False` or a class or class instance which inherited from\n"
"`cma.sigma_adaptation.CMAAdaptSigmaBase`")
adapt_sigma = CMAAdaptSigmaCSA
elif adapt_sigma is True:
if self.opts['CMA_diagonal'] is True and self.N > 299:
adapt_sigma = CMAAdaptSigmaTPA
else:
adapt_sigma = CMAAdaptSigmaCSA
elif adapt_sigma is False:
adapt_sigma = CMAAdaptSigmaNone()
if isinstance(adapt_sigma, type): # is a class?
# then we want the instance
adapt_sigma = adapt_sigma(dimension=self.N, popsize=self.sp.popsize)
return adapt_sigma
self.adapt_sigma = instantiate_adapt_sigma(opts['AdaptSigma'], self)
self.mean_shift_samples = True if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
opts['mean_shift_line_samples']) else False
def eval_vector(in_, opts, N, default_value=1.0):
"""return `default_value` as scalar or `in_` after removing
fixed variables if ``len(in_) == N``
"""
res = default_value
if in_ is not None:
if np.size(in_) == 1: # return scalar value
try:
res = float(in_[0])
except TypeError:
res = float(in_)
elif opts['fixed_variables'] and np.size(in_) > N:
res = array([in_[i] for i in range(len(in_))
if i not in opts['fixed_variables']],
dtype=float)
if len(res) != N:
utils.print_warning(
"resulting len %d != N = %d" % (len(res), N),
'eval_vector', iteration=self.countiter)
else:
res = array(in_, dtype=float)
if np.size(res) not in (1, N):
raise ValueError(
"CMA_stds option must have dimension %d "
"instead of %d" % (N, np.size(res)))
return res
opts['minstd'] = eval_vector(opts['minstd'], opts, N, 0)
opts['maxstd'] = eval_vector(opts['maxstd'], opts, N, np.inf)
# iiinteger handling, currently very basic:
# CAVEAT: integer indices may give unexpected results if fixed_variables is used
if len(opts['integer_variables']) and opts['fixed_variables']:
utils.print_warning(
"CAVEAT: fixed_variables change the meaning of "
"integer_variables indices")
# 1) prepare minstd to be a vector
if (len(opts['integer_variables']) and
np.isscalar(opts['minstd'])):
opts['minstd'] = N * [opts['minstd']]
# 2) set minstd to 1 / (2 Nint + 1),
# the setting 2 / (2 Nint + 1) already prevents convergence
for i in opts['integer_variables']:
if -N <= i < N:
opts['minstd'][i] = max((opts['minstd'][i],
1 / (2 * len(opts['integer_variables']) + 1)))
else:
utils.print_warning(
"""integer index %d not in range of dimension %d""" %
(i, N))
# initialization of state variables
self.countiter = 0
self.countevals = max((0, opts['verb_append'])) \
if not isinstance(opts['verb_append'], bool) else 0
self.pc = np.zeros(N)
self.pc_neg = np.zeros(N)
if 1 < 3: # new version with class
self.sigma_vec0 = eval_vector(self.opts['CMA_stds'], opts, N)
self.sigma_vec = transformations.DiagonalDecoding(self.sigma_vec0)
if np.isfinite(self.opts['CMA_dampsvec_fac']):
self.sigma_vec *= np.ones(N) # make sure to get a vector
else:
self.sigma_vec = eval_vector(self.opts['CMA_stds'], opts, N)
if np.isfinite(self.opts['CMA_dampsvec_fac']):
self.sigma_vec *= np.ones(N) # make sure to get a vector
self.sigma_vec0 = self.sigma_vec if np.isscalar(self.sigma_vec) \
else self.sigma_vec.copy()
stds = eval_vector(self.opts['CMA_teststds'], opts, N)
if self.opts['CMA_diagonal']: # is True or > 0
# linear time and space complexity
self.sigma_vec = transformations.DiagonalDecoding(stds * np.ones(N))
self.sm = sampler.GaussStandardConstant(N, randn=self.opts['randn'])
self._updateBDfromSM(self.sm)
if self.opts['CMA_diagonal'] is True:
self.sp.weights.finalize_negative_weights(N,
self.sp.c1_sep,
self.sp.cmu_sep,
pos_def=False)
elif self.opts['CMA_diagonal'] == 1:
raise ValueError("""Option 'CMA_diagonal' == 1 is disallowed.
Use either `True` or an iteration number > 1 up to which C should be diagonal.
Only `True` has linear memory demand.""")
else: # would ideally be done when switching
self.sp.weights.finalize_negative_weights(N,
self.sp.c1,
self.sp.cmu)
else:
if 11 < 3:
if hasattr(self.opts['vv'], '__getitem__') and \
'sweep_ccov' in self.opts['vv']:
self.opts['CMA_const_trace'] = True
if self.opts['CMA_sampler'] is None:
self.sm = sampler.GaussFullSampler(stds * np.ones(N),
lazy_update_gap=(
1. / (self.sp.c1 + self.sp.cmu + 1e-23) / self.N / 10
if self.opts['updatecovwait'] is None
else self.opts['updatecovwait']),
constant_trace=self.opts['CMA_const_trace'],
randn=self.opts['randn'],
eigenmethod=self.opts['CMA_eigenmethod'],
)
p = self.sm.parameters(mueff=self.sp.weights.mueff,
lam=self.sp.weights.lambda_)
self.sp.weights.finalize_negative_weights(N, p['c1'], p['cmu'])
elif isinstance(self.opts['CMA_sampler'], type):
try:
self.sm = self.opts['CMA_sampler'](
stds * np.ones(N),
**self.opts['CMA_sampler_options'])
except:
if max(stds) > min(stds):
utils.print_warning("different initial standard"
" deviations are not supported by the current"
" sampler and hence ignored")
elif stds[0] != 1:
utils.print_warning("""ignoring scaling factor %f
for sample distribution""" % stds[0])
self.sm = self.opts['CMA_sampler'](N,
**self.opts['CMA_sampler_options'])
else: # CMA_sampler is already initialized as class instance
self.sm = self.opts['CMA_sampler']
if not isinstance(self.sm, interfaces.StatisticalModelSamplerWithZeroMeanBaseClass):
utils.print_warning("""statistical model sampler did
not evaluate to the expected type `%s` but to type `%s`. This is
likely to lead to an exception later on. """ % (
str(type(interfaces.StatisticalModelSamplerWithZeroMeanBaseClass)),
str(type(self.sm))))
self._updateBDfromSM(self.sm)
self.dC = self.sm.variances
self.D = self.dC**0.5 # we assume that the initial C is diagonal
self.pop_injection_solutions = []
self.pop_injection_directions = []
self.number_of_solutions_asked = 0
self.number_of_injections_delivered = 0 # used/delivered in asked
# self.gp.pheno adds fixed variables
relative_stds = ((self.gp.pheno(self.mean + self.sigma * self.sigma_vec * self.D)
- self.gp.pheno(self.mean - self.sigma * self.sigma_vec * self.D)) / 2.0
/ (self.boundary_handler.get_bounds('upper', self.N_pheno)
- self.boundary_handler.get_bounds('lower', self.N_pheno)))
if np.any(relative_stds > 1):
idx = np.nonzero(relative_stds > 1)[0]
s = ("Initial standard deviation "
"%s larger than the bounded domain size in variable %s.\n"
"Consider using option 'CMA_stds', if the bounded "
"domain sizes differ significantly. "
% (("s (sigma0*stds) are", str(idx))
if len(idx) > 1 else (" (sigma0*stds) is",
str(idx[0]))))
raise ValueError(s)
self._flgtelldone = True
self.itereigenupdated = self.countiter
self.count_eigen = 0
self.noiseS = 0 # noise "signal"
self.hsiglist = []
self.sent_solutions = _CMASolutionDict()
self.archive = _CMASolutionDict()
self._injected_solutions_archive = _SolutionDict()
self.best = ot.BestSolution()
self.const = _BlancClass()
self.const.chiN = N**0.5 * (1 - 1. / (4.*N) + 1. / (21.*N**2)) # expectation of norm(randn(N,1))
self.logger = CMADataLogger(opts['verb_filenameprefix'],
modulo=opts['verb_log'],
expensive_modulo=opts['verb_log_expensive']).register(self)
self._stopdict = _CMAStopDict()
" attribute for stopping criteria in function stop"
self.callbackstop = ()
" return values of callbacks, used like ``if any(callbackstop)``"
self.fit = _BlancClass()
self.fit.fit = [] # not really necessary
self.fit.hist = [] # short history of best
self.fit.histbest = [] # long history of best
self.fit.histmedian = [] # long history of median
self.fit.median = None
self.fit.median0 = None
self.fit.median_min = np.inf
self.fit.flatfit_iterations = 0
self.more_to_write = utils.MoreToWrite() # [1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
# say hello
if opts['verb_disp'] > 0 and opts['verbose'] >= 0:
sweighted = '_w' if self.sp.weights.mu > 1 else ''
smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
print('(%d' % (self.sp.weights.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr +
')-' + ('a' if opts['CMA_active'] else '') + 'CMA-ES' +
' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.weights.mueff, int(100 * self.sp.weights[0])) +
' in dimension %d (seed=%s, %s)' % (N, str(opts['seed']), time.asctime())) # + func.__name__
if opts['CMA_diagonal'] and self.sp.CMA_on:
s = ''
if opts['CMA_diagonal'] is not True:
s = ' for '
if opts['CMA_diagonal'] < np.inf:
s += str(int(opts['CMA_diagonal']))
else:
s += str(np.floor(opts['CMA_diagonal']))
s += ' iterations'
s += ' (1/ccov=' + str(round(1. / (self.sp.c1 + self.sp.cmu))) + ')'
print(' Covariance matrix is diagonal' + s)
def _set_x0(self, x0):
"""Assign `self.x0` from argument `x0`.
Input `x0` may be a `callable` or a `list` or `numpy.ndarray` of
the desired length.
Below an artificial example is given, where calling `x0`
delivers in the first two calls ``dimension * [5]`` and in
succeeding calls``dimension * [0.01]``. Only the initial value of
0.01 solves the Rastrigin function:
>>> import cma
>>> class X0:
... def __init__(self, dimension):
... self.irun = 0
... self.dimension = dimension
... def __call__(self):
... """"""
... self.irun += 1
... return (self.dimension * [5] if self.irun < 3
... else self.dimension * [0.01])
>>> xopt, es = cma.fmin2(cma.ff.rastrigin, X0(3), 0.01,
... {'verbose':-9}, restarts=1)
>>> assert es.result.fbest > 1e-5
>>> xopt, es = cma.fmin2(cma.ff.rastrigin, X0(3), 0.01,
... {'verbose':-9}, restarts=2)
>>> assert es.result.fbest < 1e-5 # third run succeeds due to x0
"""
try:
x0 = x0()
except TypeError:
if utils.is_str(x0):
raise ValueError("x0 may be a callable, but a string is no longer permitted")
# x0 = eval(x0)
self.x0 = array(x0, dtype=float, copy=True) # should not have column or row, is just 1-D
if self.x0.ndim == 2 and 1 in self.x0.shape:
utils.print_warning('input x0 should be a list or 1-D array, trying to flatten ' +
str(self.x0.shape) + '-array')
if self.x0.shape[0] == 1:
self.x0 = self.x0[0]
elif self.x0.shape[1] == 1:
self.x0 = array([x[0] for x in self.x0])
if self.x0.ndim != 1:
raise ValueError('x0 must be 1-D array')
if len(self.x0) <= 1:
raise ValueError('optimization in 1-D is not supported (code was never tested)')
try:
self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?!
except NotImplementedError:
pass
def _copy_light(self, sigma=None, inopts=None):
"""tentative copy of self, versatile (interface and functionalities may change).
`sigma` overwrites the original initial `sigma`.
`inopts` allows to overwrite any of the original options.
This copy may not work as expected depending on the used sampler.
Copy mean and sample distribution parameters and input options. Do
not copy evolution paths, termination status or other state variables.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(3 * [1], 0.1,
... {'verbose':-9}).optimize(cma.ff.elli, iterations=10)
>>> es2 = es._copy_light()
>>> assert es2.sigma == es.sigma
>>> assert sum((es.sm.C - es2.sm.C).flat < 1e-12)
>>> es3 = es._copy_light(sigma=10)
>>> assert es3.sigma == es3.sigma0 == 10
>>> es4 = es._copy_light(inopts={'CMA_on': False})
>>> assert es4.sp.c1 == es4.sp.cmu == 0
"""
if sigma is None:
sigma = self.sigma
opts = dict(self.inopts)
if inopts is not None:
opts.update(inopts)
es = type(self)(self.mean[:], sigma, opts)
es.sigma_vec = transformations.DiagonalDecoding(self.sigma_vec.scaling)
try: es.sm.C = self.sm.C.copy()
except: warnings.warn("self.sm.C.copy failed")
es.sm.update_now(-1) # make B and D consistent with C
es._updateBDfromSM()
return es
# ____________________________________________________________
# ____________________________________________________________
def ask(self, number=None, xmean=None, sigma_fac=1,
gradf=None, args=(), **kwargs):
"""get/sample new candidate solutions.
Solutions are sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean, phenotyp?
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`gradf`
gradient, ``len(gradf(x)) == len(x)``, if
``gradf is not None`` the third solution in the
returned list is "sampled" in supposedly Newton
direction ``np.dot(C, gradf(xmean, *args))``.
`args`
additional arguments passed to gradf
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3) #doctest: +ELLIPSIS
(4_w,...
>>> while not es.stop() and es.best.f > 1e-6:
... X = es.ask() # get list of new solutions
... fit = [cma.ff.rosen(x) for x in X] # call fct with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
assert self.countiter >= 0
if kwargs:
utils.print_warning("""Optional argument%s \n\n %s\n\nignored""" % (
'(s)' if len(kwargs) > 1 else '', str(kwargs)),
"ask", "CMAEvolutionStrategy",
self.countiter, maxwarns=1)
if self.countiter == 0:
self.timer = utils.ElapsedWCTime()
else:
self.timer.tic
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
pop_pheno = [self.gp.pheno(x, copy=True,
into_bounds=self.boundary_handler.repair)
for x in pop_geno]
if gradf is not None:
if not isinstance(self.sm, sampler.GaussFullSampler):
utils.print_warning("""Gradient injection may fail,
because sampler attributes `B` and `D` are not present""",
"ask", "CMAEvolutionStrategy",
self.countiter, maxwarns=1)
try:
# see Hansen (2011), Injecting external solutions into CMA-ES
if not self.gp.islinear:
utils.print_warning("""
using the gradient (option ``gradf``) with a non-linear
coordinate-wise transformation (option ``transformation``)
has never been tested.""")
# TODO: check this out
def grad_numerical_of_coordinate_map(x, map, epsilon=None):
"""map is a coordinate-wise independent map, return
the estimated diagonal of the Jacobian.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
return (map(x + eps) - map(x - eps)) / (2 * eps)
def grad_numerical_sym(x, func, epsilon=None):
"""return symmetric numerical gradient of func : R^n -> R.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
try:
if self.last_iteration_with_gradient == self.countiter:
utils.print_warning('gradient is used several times in ' +
'this iteration', iteration=self.countiter,
verbose=self.opts['verbose'])
self.last_iteration_with_gradient = self.countiter
except AttributeError:
pass
index_for_gradient = min((2, len(pop_pheno)-1))
if xmean is None:
xmean = self.mean
xpheno = self.gp.pheno(xmean, copy=True,
into_bounds=self.boundary_handler.repair)
grad_at_mean = gradf(xpheno, *args)
# lift gradient into geno-space
if not self.gp.isidentity or (self.boundary_handler is not None
and self.boundary_handler.has_bounds()):
boundary_repair = None
gradpen = 0
if isinstance(self.boundary_handler, BoundTransform):
boundary_repair = self.boundary_handler.repair
elif isinstance(self.boundary_handler,
BoundPenalty):
fpenalty = lambda x: self.boundary_handler.__call__(
x, _SolutionDict({tuple(x): {'geno': x}}), self.gp)
gradpen = grad_numerical_sym(
xmean, fpenalty)
elif self.boundary_handler is None or \
isinstance(self.boundary_handler,
BoundNone):
pass
else:
raise NotImplementedError(
"unknown boundary handling method" +
str(self.boundary_handler) +
" when using gradf")
gradgp = grad_numerical_of_coordinate_map(
xmean,
lambda x: self.gp.pheno(x, copy=True,
into_bounds=boundary_repair))
grad_at_mean = grad_at_mean * gradgp + gradpen
# TODO: frozen variables brake the code (e.g. at grad of map)
if len(grad_at_mean) != self.N or self.opts['fixed_variables']:
NotImplementedError("""
gradient with fixed variables is not (yet) implemented,
implement a simple transformation of the objective instead""")
v = self.sm.D * np.dot(self.sm.B.T, self.sigma_vec * grad_at_mean)
# newton_direction = sv * B * D * D * B^T * sv * gradient = sv * B * D * v
# v = D^-1 * B^T * sv^-1 * newton_direction = D * B^T * sv * gradient
q = sum(v**2)
if q:
# Newton direction
pop_geno[index_for_gradient] = xmean - self.sigma \
* (self.N / q)**0.5 \
* (self.sigma_vec * np.dot(self.sm.B, self.sm.D * v))
if 11 < 3 and self.opts['vv']:
# gradient direction
q = sum((np.dot(self.sm.B.T, self.sigma_vec**-1 * grad_at_mean) / self.sm.D)**2)
pop_geno[index_for_gradient] = xmean - self.sigma \
* (self.N / q)**0.5 * grad_at_mean \
if q else xmean
else:
pop_geno[index_for_gradient] = xmean
utils.print_warning('gradient zero observed',
iteration=self.countiter)
# test "pure" gradient:
# pop_geno[index_for_gradient] = -0.52 * grad_at_mean
pop_pheno[index_for_gradient] = self.gp.pheno(
pop_geno[index_for_gradient], copy=True,
into_bounds=self.boundary_handler.repair)
if 11 < 3:
print("x/m", pop_pheno[index_for_gradient] / self.mean)
print(" x-m=",
pop_pheno[index_for_gradient] - self.mean)
print(" g=", grad_at_mean)
print(" (x-m-g)/||g||=", (pop_pheno[index_for_gradient] - self.mean - grad_at_mean) / sum(grad_at_mean**2)**0.5
)
except AttributeError:
utils.print_warning("""Gradient injection failed
presumably due to missing attribute ``self.sm.B or self.sm.D``""")
# insert solutions, this could also (better?) be done in self.gp.pheno
for i in rglen((pop_geno)):
self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i],
iteration=self.countiter)
### iiinteger handling could come here
return pop_pheno
# ____________________________________________________________
# ____________________________________________________________
def ask_geno(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions in genotyp.
Solutions are sampled from a multi-variate normal distribution.
Arguments are
`number`
number of returned solutions, by default the
population size `popsize` (AKA lambda).
`xmean`
distribution mean
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`ask_geno` returns a list of N-dimensional candidate solutions
in genotyp representation and is called by `ask`.
Details: updates the sample distribution if needed and might
change the geno-pheno transformation during this update.
:See: `ask`, `ask_and_eval`
"""
if number is None or number < 1:
number = self.sp.popsize
if self.number_of_solutions_asked == 0:
self.number_of_injections = (
len(self.pop_injection_directions) +
len(self.pop_injection_solutions))
# update distribution, might change self.mean
# if not self.opts['tolconditioncov'] or not np.isfinite(self.opts['tolconditioncov']):
if self.opts['conditioncov_alleviate']:
self.alleviate_conditioning_in_coordinates(self.opts['conditioncov_alleviate'][0])
self.alleviate_conditioning(self.opts['conditioncov_alleviate'][-1])
xmean_arg = xmean
if xmean is None:
xmean = self.mean
else:
try:
xmean = self.archive[xmean]['geno']
# noise handling after call of tell
except KeyError:
try:
xmean = self.sent_solutions[xmean]['geno']
# noise handling before calling tell
except KeyError:
pass
if 11 < 3:
if self.opts['CMA_AII']:
if self.countiter == 0:
# self.aii = AII(self.x0, self.sigma0)
pass
self._flgtelldone = False
pop = self.aii.ask(number)
return pop
sigma = sigma_fac * self.sigma
# update parameters for sampling the distribution
# fac 0 1 10
# 150-D cigar:
# 50749 50464 50787
# 200-D elli: == 6.9
# 99900 101160
# 100995 103275 == 2% loss
# 100-D elli: == 6.9
# 363052 369325 < 2% loss
# 365075 365755
# sample distribution
if self._flgtelldone: # could be done in tell()!?
self._flgtelldone = False
self.ary = []
# check injections from pop_injection_directions
arinj = []
# a hack: do not use injection when only a single solution is asked for or a solution with a specific mean
if number > 1 and (xmean_arg is None or Mh.vequals_approximately(xmean_arg, self.mean)):
if self.countiter < 4 and \
len(self.pop_injection_directions) > self.popsize - 2:
utils.print_warning(' %d special injected samples with popsize %d, '
% (len(self.pop_injection_directions), self.popsize)
+ "popsize %d will be used" % (len(self.pop_injection_directions) + 2)
+ (" and the warning is suppressed in the following" if self.countiter == 3 else ""))
# directions must come first because of mean_shift_samples/TPA
while self.pop_injection_directions:
if len(arinj) >= number:
break
# TODO: if len(arinj) > number, ask doesn't fulfill the contract
y = self.pop_injection_directions.pop(0)
# sigma_vec _is_ taken into account here
# this may be done again in tell
if self.mahalanobis_norm(y) > self.N**0.5 * self.opts['CMA_injections_threshold_keep_len']:
nominator = self._random_rescaling_factor_to_mahalanobis_size(y)
else:
nominator = 1
y *= nominator / self.sigma
arinj.append(y)
while self.pop_injection_solutions:
arinj.append((self.pop_injection_solutions.pop(0) - self.mean) / self.sigma)
if self.mean_shift_samples and self.countiter > 1:
# TPA is implemented by injection of the Delta mean
if len(arinj) < 2:
raise RuntimeError(
"Mean shift samples are expected but missing.\n"
"This happens if, for example, `ask` is called"
" more than once, without calling `tell`\n"
"(because the first call removes the samples from"
" the injection list).\n"
"`cma.sigma_adaptation.CMAAdaptSigmaTPA`"
" step-size adaptation generates mean shift\n"
"samples and relies on them. \n"
"Using ``ask(1)`` for any subsequent calls of"
" `ask` works OK and TPA works if the\n"
"first two samples from the"
" first call are retained as first samples when"
" calling `tell`. \n"
"EXAMPLE: \n"
" X = es.ask()\n"
" X.append(es.ask(1)[0])\n"
" ...\n"
" es.tell(X, ...)"
)
# for TPA, set both vectors to the same length and don't
# ever keep the original length
arinj[0] *= self._random_rescaling_factor_to_mahalanobis_size(arinj[0]) / self.sigma
arinj[1] *= (np.sum(arinj[0]**2) / np.sum(arinj[1]**2))**0.5
if not Mh.vequals_approximately(arinj[0], -arinj[1]):
utils.print_warning(
"mean_shift_samples, but the first two solutions"
" are not mirrors.",
"ask_geno", "CMAEvolutionStrategy",
self.countiter)
# arinj[1] /= sum(arinj[0]**2)**0.5 / s1 # revert change
self.number_of_injections_delivered += len(arinj)
assert (self.countiter < 2 or not self.mean_shift_samples
or self.number_of_injections_delivered >= 2)
Niid = number - len(arinj) # each row is a solution
# compute ary
if Niid >= 0: # should better be true
ary = self.sigma_vec * np.asarray(self.sm.sample(Niid))
self._updateBDfromSM(self.sm) # sm.sample invoked lazy update
# unconditional mirroring
if self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 0:
for i in range(Mh.sround(self.sp.lam_mirr * number / self.popsize)):
if 2 * (i + 1) > len(ary):
utils.print_warning("fewer mirrors generated than given in parameter setting (%d<%d)"
% (i, self.sp.lam_mirr),
"ask_geno", "CMAEvolutionStrategy",
iteration=self.countiter,
maxwarns=4)
break
ary[-1 - 2 * i] = -ary[-2 - 2 * i]
if len(arinj):
ary = np.vstack((arinj, ary))
else:
ary = array(arinj)
assert number == len(arinj)
if self.opts['verbose'] > 4 and self.countiter < 3 and len(arinj) and self.adapt_sigma is not CMAAdaptSigmaTPA:
utils.print_message(' %d pre-injected solutions will be used (popsize=%d)' %
(len(arinj), len(ary)))
pop = xmean + sigma * ary
for i, x in enumerate(pop[:len(arinj)]):
self._injected_solutions_archive[x] = {
'iteration': self.countiter, # values are currently never used
'index': i,
'counter': len(self._injected_solutions_archive)
}
# pprint(dict(self._injected_solutions_archive))
self.evaluations_per_f_value = 1
self.ary = ary
self.number_of_solutions_asked += len(pop)
return pop
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x = x - self.mean # -= fails if dtypes don't agree
if any(x): # let's not divide by zero
x *= sum(self.randn(1, len(x))[0]**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x
def _random_rescaling_factor_to_mahalanobis_size(self, y):
"""``self.mean + self._random_rescaling_factor_to_mahalanobis_size(y) * y``
is guarantied to appear like from the sample distribution.
"""
if len(y) != self.N:
raise ValueError('len(y)=%d != %d=dimension' % (len(y), self.N))
if not any(y):
utils.print_warning("input was all-zeros, which is probably a bug",
"_random_rescaling_factor_to_mahalanobis_size",
iteration=self.countiter)
return 1.0
return np.sum(self.randn(1, len(y))[0]**2)**0.5 / self.mahalanobis_norm(y)
def get_mirror(self, x, preserve_length=False):
"""return ``pheno(self.mean - (geno(x) - self.mean))``.
>>> import numpy as np, cma
>>> es = cma.CMAEvolutionStrategy(np.random.randn(3), 1) #doctest: +ELLIPSIS
(3_w,...
>>> x = np.random.randn(3)
>>> assert cma.utilities.math.Mh.vequals_approximately(es.mean - (x - es.mean), es.get_mirror(x, preserve_length=True))
>>> x = es.ask(1)[0]
>>> vals = (es.get_mirror(x) - es.mean) / (x - es.mean)
>>> assert cma.utilities.math.Mh.equals_approximately(sum(vals), len(vals) * vals[0])
TODO: this implementation is yet experimental.
TODO: this implementation includes geno-pheno transformation,
however in general GP-transformation should be separated from
specific code.
Selectively mirrored sampling improves to a moderate extend but
overadditively with active CMA for quite understandable reasons.
Optimal number of mirrors are suprisingly small: 1,2,3 for
maxlam=7,13,20 where 3,6,10 are the respective maximal possible
mirrors that must be clearly suboptimal.
"""
try:
dx = self.sent_solutions[x]['geno'] - self.mean
except: # can only happen with injected solutions?!
dx = self.gp.geno(x, from_bounds=self.boundary_handler.inverse,
copy=True) - self.mean
if not preserve_length:
# dx *= sum(self.randn(1, self.N)[0]**2)**0.5 / self.mahalanobis_norm(dx)
dx *= self._random_rescaling_factor_to_mahalanobis_size(dx)
x = self.mean - dx
y = self.gp.pheno(x, into_bounds=self.boundary_handler.repair)
# old measure: costs 25% in CPU performance with N,lambda=20,200
self.sent_solutions.insert(y, geno=x, iteration=self.countiter)
return y
# ____________________________________________________________
# ____________________________________________________________
#
def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, kappa=1, parallel_mode=False):
"""sample `number` solutions and evaluate them on `func`.
Each solution ``s`` is resampled until
``self.is_feasible(s, func(s)) is True``.
Arguments
---------
`func`:
objective function, ``func(x)`` accepts a `numpy.ndarray`
and returns a scalar ``if not parallel_mode``. Else returns a
`list` of scalars from a `list` of `numpy.ndarray`.
`args`:
additional parameters for `func`
`gradf`:
gradient of objective function, ``g = gradf(x, *args)``
must satisfy ``len(g) == len(x)``
`number`:
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`:
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`:
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`:
number of evaluations for each sampled solution
`aggregation`:
function that aggregates `evaluations` values to
as single value.
`kappa`:
multiplier used for the evaluation of the solutions, in
that ``func(m + kappa*(x - m))`` is the f-value for ``x``.
Return
------
``(X, fit)``, where
- `X`: list of solutions
- `fit`: list of respective function values
Details
-------
While ``not self.is_feasible(x, func(x))`` new solutions are
sampled. By default
``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``.
The argument to `func` can be freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not
sampled independently but as mirrors of other bad solutions. This
is a simple derandomization that can save 10-30% of the
evaluations in particular with small populations, for example on
the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8 * [10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0) #doctest: +ELLIPSIS
(5_w,...
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.ff.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration #doctest: +ELLIPSIS
Iterat #Fevals...
>>> print('terminated on ' + str(es.stop())) #doctest: +ELLIPSIS
terminated on ...
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes::
while not es.stop():
es.tell(*es.ask_and_eval(cma.ff.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = int(number)
if self.opts['CMA_mirrormethod'] == 1: # direct selective mirrors
nmirrors = Mh.sround(self.sp.lam_mirr * popsize / self.sp.popsize)
self._mirrormethod1_done = self.countiter
else:
# method==0 unconditional mirrors are done in ask_geno
# method==2 delayed selective mirrors are done via injection
nmirrors = 0
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
is_feasible = self.opts['is_feasible']
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args)
if xmean is None:
xmean = self.mean # might have changed in self.ask
X = []
if parallel_mode:
if hasattr(func, 'evaluations'):
evals0 = func.evaluations
fit_first = func(X_first, *args)
# the rest is only book keeping and warnings spitting
if hasattr(func, 'evaluations'):
self.countevals += func.evaluations - evals0 - self.popsize # why not .sp.popsize ?
if nmirrors and self.opts['CMA_mirrormethod'] > 0 and self.countiter < 2:
utils.print_warning(
"selective mirrors will not work in parallel mode",
"ask_and_eval", "CMAEvolutionStrategy")
if evaluations > 1 and self.countiter < 2:
utils.print_warning(
"aggregating evaluations will not work in parallel mode",
"ask_and_eval", "CMAEvolutionStrategy")
else:
fit_first = len(X_first) * [None]
for k in range(popsize):
x, f = X_first.pop(0), fit_first.pop(0)
rejected = -1
while f is None or not is_feasible(x, f): # rejection sampling
if parallel_mode:
utils.print_warning(
"rejection sampling will not work in parallel mode"
" unless the parallel_objective makes a distinction\n"
"between called with a numpy array vs a list (of"
" numpy arrays) as first argument.",
"ask_and_eval", "CMAEvolutionStrategy")
rejected += 1
if rejected: # resample
x = self.ask(1, xmean, sigma_fac)[0]
elif k >= popsize - nmirrors: # selective mirrors
if k == popsize - nmirrors:
self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
# constraints handling test hardwired ccccccccccc
length_normalizer = 1
# zzzzzzzzzzzzzzzzzzzzzzzzz
if 11 < 3:
# for some unclear reason, this normalization does not work as expected: the step-size
# becomes sometimes too large and overall the mean might diverge. Is the reason that
# we observe random fluctuations, because the length is not selection relevant?
# However sigma-adaptation should mainly work on the correlation, not the length?
# Or is the reason the deviation of the direction introduced by using the original
# length, which also can effect the measured correlation?
# Update: if the length of z in CSA is clipped at chiN+1, it works, but only sometimes?
length_normalizer = self.N**0.5 / self.mahalanobis_norm(x - xmean) # self.const.chiN < N**0.5, the constant here is irrelevant (absorbed by kappa)
# print(self.N**0.5 / self.mahalanobis_norm(x - xmean))
# self.more_to_write += [length_normalizer * 1e-3, length_normalizer * self.mahalanobis_norm(x - xmean) * 1e2]
f = func(x, *args) if kappa == 1 else \
func(xmean + kappa * length_normalizer * (x - xmean),
*args)
if is_feasible(x, f) and evaluations > 1:
f = aggregation([f] + [(func(x, *args) if kappa == 1 else
func(xmean + kappa * length_normalizer * (x - xmean), *args))
for _i in range(int(evaluations - 1))])
if (rejected + 1) % 1000 == 0:
utils.print_warning(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(rejected, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
if any(f is None or np.isnan(f) for f in fit):
idxs = [i for i in range(len(fit))
if fit[i] is None or np.isnan(fit[i])]
utils.print_warning("f-values %s contain None or NaN at indices %s"
% (str(fit[:30]) + ('...' if len(fit) > 30 else ''),
str(idxs)),
'ask_and_tell',
'CMAEvolutionStrategy',
self.countiter)
return X, fit
def _prepare_injection_directions(self):
"""provide genotypic directions for TPA and selective mirroring,
with no specific length normalization, to be used in the
coming iteration.
Details:
This method is called in the end of `tell`. The result is
assigned to ``self.pop_injection_directions`` and used in
`ask_geno`.
"""
# self.pop_injection_directions is supposed to be empty here
if self.pop_injection_directions or self.pop_injection_solutions:
raise ValueError("""Found unused injected direction/solutions.
This could be a bug in the calling order/logics or due to
a too small popsize used in `ask()` or when only using
`ask(1)` repeatedly. """)
ary = []
if self.mean_shift_samples:
ary = [self.mean - self.mean_old]
ary.append(self.mean_old - self.mean) # another copy!
if np.alltrue(ary[-1] == 0.0):
utils.print_warning('zero mean shift encountered',
'_prepare_injection_directions',
'CMAEvolutionStrategy', self.countiter)
if self.opts['pc_line_samples']: # caveat: before, two samples were used
ary.append(self.pc.copy())
if self.sp.lam_mirr and (
self.opts['CMA_mirrormethod'] == 2 or (
self.opts['CMA_mirrormethod'] == 1 and ( # replacement for direct selective mirrors
not hasattr(self, '_mirrormethod1_done') or
self._mirrormethod1_done < self.countiter - 1))):
i0 = len(ary)
ary += self.get_selective_mirrors()
self._indices_of_selective_mirrors = range(i0, len(ary))
self.pop_injection_directions = ary
return ary
def get_selective_mirrors(self, number=None):
"""get mirror genotypic directions from worst solutions.
Details:
To be called after the mean has been updated.
Takes the last ``number=sp.lam_mirr`` entries in the
``self.pop[self.fit.idx]`` as solutions to be mirrored.
Do not take a mirror if it is suspected to stem from a
previous mirror in order to not go endlessly back and forth.
"""
if number is None:
number = self.sp.lam_mirr
if not hasattr(self, '_indices_of_selective_mirrors'):
self._indices_of_selective_mirrors = []
res = []
for i in range(1, number + 1):
if 'all-selective-mirrors' in self.opts['vv'] or self.fit.idx[-i] not in self._indices_of_selective_mirrors:
res.append(self.mean_old - self.pop[self.fit.idx[-i]])
assert len(res) >= number - len(self._indices_of_selective_mirrors)
return res
# ____________________________________________________________
def tell(self, solutions, function_values, check_points=None,
copy=False):
"""pass objective function values to prepare for next
iteration. This core procedure of the CMA-ES algorithm updates
all state variables, in particular the two evolution paths, the
distribution mean, the covariance matrix and a step-size.
Arguments
---------
`solutions`
list or array of candidate solution points (of
type `numpy.ndarray`), most presumably before
delivered by method `ask()` or `ask_and_eval()`.
`function_values`
list or array of objective function values
corresponding to the respective points. Beside for termination
decisions, only the ranking of values in `function_values`
is used.
`check_points`
If ``check_points is None``, only solutions that are not generated
by `ask()` are possibly clipped (recommended). ``False`` does not clip
any solution (not recommended).
If ``True``, clips solutions that realize long steps (i.e. also
those that are unlikely to be generated with `ask()`). `check_points`
can be a list of indices to be checked in solutions.
`copy`
``solutions`` can be modified in this routine, if ``copy is False``
Details
-------
`tell()` updates the parameters of the multivariate
normal search distribution, namely covariance matrix and
step-size and updates also the attributes ``countiter`` and
``countevals``. To check the points for consistency is quadratic
in the dimension (like sampling points).
Bugs
----
The effect of changing the solutions delivered by `ask()`
depends on whether boundary handling is applied. With boundary
handling, modifications are disregarded. This is necessary to
apply the default boundary handling that uses unrepaired
solutions but might change in future.
Example
-------
>>> import cma
>>> func = cma.ff.sphere # choose objective function
>>> es = cma.CMAEvolutionStrategy(np.random.rand(2) / 3, 1.5)
... # doctest:+ELLIPSIS
(3_...
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [func(x) for x in X])
>>> es.result # result is a `namedtuple` # doctest:+ELLIPSIS
CMAEvolutionStrategyResult(xbest=array([...
:See: class `CMAEvolutionStrategy`, `ask`, `ask_and_eval`, `fmin`
"""
if self._flgtelldone:
raise RuntimeError('tell should only be called once per iteration')
lam = len(solutions)
if lam != len(function_values):
raise ValueError('#f-values = %d must equal #solutions = %d'
% (len(function_values), lam))
if lam + self.sp.lam_mirr < 3:
raise ValueError('population size ' + str(lam) +
' is too small with option ' +
'CMA_mirrors * popsize < 0.5')
if not np.isscalar(function_values[0]):
try:
if np.isscalar(function_values[0][0]):
if self.countiter <= 1:
utils.print_warning('''function_values is not a list of scalars,
the first element equals %s with non-scalar type %s.
Using now ``[v[0] for v in function_values]`` instead (further warnings are suppressed)'''
% (str(function_values[0]), str(type(function_values[0]))))
function_values = [val[0] for val in function_values]
else:
raise ValueError('objective function values must be a list of scalars')
except:
utils.print_message("function values=%s" % function_values,
method_name='tell', class_name='CMAEvolutionStrategy',
verbose=9, iteration=self.countiter)
raise
if any(f is None or np.isnan(f) for f in function_values):
idx_none = [i for i, f in enumerate(function_values) if f is None]
idx_nan = [i for i, f in enumerate(function_values) if f is not None and np.isnan(f)]
m = np.median([f for f in function_values
if f is not None and not np.isnan(f)])
utils.print_warning("function values with index %s/%s are nan/None and will be set to the median value %s"
% (str(idx_nan), str(idx_none), str(m)), 'ask',
'CMAEvolutionStrategy', self.countiter)
for i in idx_nan + idx_none:
function_values[i] = m
if not np.isfinite(function_values).all():
idx = [i for i, f in enumerate(function_values)
if not np.isfinite(f)]
utils.print_warning("function values with index %s are not finite but %s."
% (str(idx), str([function_values[i] for i in idx])), 'ask',
'CMAEvolutionStrategy', self.countiter)
if self.number_of_solutions_asked <= self.number_of_injections:
utils.print_warning("""no independent samples generated because the
number of injected solutions, %d, equals the number of
solutions asked, %d, where %d solutions remain to be injected
""" % (self.number_of_injections,
self.number_of_solutions_asked,
len(self.pop_injection_directions) + len(self.pop_injection_solutions)),
"ask_geno", "CMAEvolutionStrategy", self.countiter)
self.number_of_solutions_asked = 0
# ## prepare
N = self.N
sp = self.sp
if 11 < 3 and lam != sp.popsize: # turned off, because mu should stay constant, still not desastrous
utils.print_warning('population size has changed, recomputing parameters')
self.sp.set(self.opts, lam) # not really tested
if lam < sp.weights.mu: # rather decrease cmean instead of having mu > lambda//2
raise ValueError('not enough solutions passed to function tell (mu>lambda)')
self.countiter += 1 # >= 1 now
self.countevals += sp.popsize * self.evaluations_per_f_value
self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
flg_diagonal = self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']
if not flg_diagonal and isinstance(self.sm, sampler.GaussStandardConstant):
self.sm = sampler.GaussFullSampler(N)
self._updateBDfromSM(self.sm)
# ## manage fitness
fit = self.fit # make short cut
# CPU for N,lam=20,200: this takes 10s vs 7s
fit.bndpen = self.boundary_handler.update(function_values, self)(solutions, self.sent_solutions, self.gp)
# for testing:
# fit.bndpen = self.boundary_handler.update(function_values, self)([s.unrepaired for s in solutions])
fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
fit.fit = array(function_values, copy=False)[fit.idx]
# update output data TODO: this is obsolete!? However: need communicate current best x-value?
# old: out['recent_x'] = self.gp.pheno(pop[0])
# self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
# self.out['recent_f'] = fit.fit[0]
# fitness histories
fit.hist.insert(0, fit.fit[0]) # caveat: this may neither be the best nor the best in-bound fitness, TODO
fit.median = (fit.fit[self.popsize // 2] if self.popsize % 2
else np.mean(fit.fit[self.popsize // 2 - 1: self.popsize // 2 + 1]))
# if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
fit.histbest.insert(0, fit.fit[0])
fit.histmedian.insert(0, fit.median)
if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
fit.histbest.pop()
fit.histmedian.pop()
if len(fit.hist) > 10 + 30 * N / sp.popsize:
fit.hist.pop()
if fit.median0 is None:
fit.median0 = fit.median
if fit.median_min > fit.median:
fit.median_min = fit.median
### line 2665
# TODO: clean up inconsistency when an unrepaired solution is available and used
# now get the genotypes
self.pop_sorted = None
pop = [] # create pop from input argument solutions
for k, s in enumerate(solutions): # use phenotype before Solution.repair()
if 1 < 3:
pop += [self.gp.geno(s,
from_bounds=self.boundary_handler.inverse,
repair=(self.repair_genotype if check_points not in (False, 0, [], ()) else None),
archive=self.sent_solutions)] # takes genotype from sent_solutions, if available
try:
self.archive.insert(s, value=self.sent_solutions.pop(s), fitness=function_values[k])
# self.sent_solutions.pop(s)
except KeyError:
pass
# check that TPA mirrors are available
self.pop = pop # used in check_consistency of CMAAdaptSigmaTPA
self.adapt_sigma.check_consistency(self)
if self.countiter > 1:
self.mean_old_old = self.mean_old
self.mean_old = self.mean
mold = self.mean_old # just an alias
# check and normalize each x - m
# check_points is a flag (None is default: check non-known solutions) or an index list
# should also a number possible (first check_points points)?
if check_points not in (None, False, 0, [], ()):
# useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions
# by default this is not executed
try:
if len(check_points):
idx = check_points
except:
idx = range(sp.popsize)
for k in idx:
self.repair_genotype(pop[k])
# sort pop for practicability, now pop != self.pop, which is unsorted
pop = np.asarray(pop)[fit.idx]
# prepend best-ever solution to population, in case
# note that pop and fit.fit do not agree anymore in this case
if self.opts['CMA_elitist'] == 'initial':
if not hasattr(self, 'f0'):
utils.print_warning(
'Set attribute `es.f0` to make initial elitism\n' +
'available or use cma.fmin.',
'tell', 'CMAEvolutionStrategy', self.countiter)
elif fit.fit[0] > self.f0:
x_elit = self.mean0.copy()
# self.clip_or_fit_solutions([x_elit], [0]) # just calls repair_genotype
self.random_rescale_to_mahalanobis(x_elit)
pop = array([x_elit] + list(pop), copy=False)
utils.print_message('initial solution injected %f<%f' %
(self.f0, fit.fit[0]),
'tell', 'CMAEvolutionStrategy',
self.countiter, verbose=self.opts['verbose'])
elif self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
if self.best.x_geno is not None:
xp = [self.best.x_geno]
# xp = [self.best.xdict['geno']]
# xp = [self.gp.geno(self.best.x[:])] # TODO: remove
# print self.mahalanobis_norm(xp[0]-self.mean)
else:
xp = [self.gp.geno(array(self.best.x, copy=True),
self.boundary_handler.inverse,
copy=False)]
utils.print_warning('genotype for elitist not found', 'tell')
# self.clip_or_fit_solutions(xp, [0])
self.random_rescale_to_mahalanobis(xp[0])
pop = np.asarray([xp[0]] + list(pop))
self.pop_sorted = pop
# compute new mean
self.mean = mold + self.sp.cmean * \
(np.sum(np.asarray(sp.weights.positive_weights) * pop[0:sp.weights.mu].T, 1) - mold)
# check Delta m (this is not default, but could become at some point)
# CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
# replaced by repair_geno?
# simple test case injecting self.mean:
# self.mean = 1e-4 * self.sigma * np.random.randn(N)
if 11 < 3 and self.opts['vv'] and check_points: # CAVEAT: check_points might be an index-list
cmean = self.sp.cmean / min(1, ((self.opts['vv'] * N)**0.5 + 2) / (# abuse of cmean
(self.sp.weights.mueff**0.5 / self.sp.cmean) *
self.mahalanobis_norm(self.mean - mold)))
else:
cmean = self.sp.cmean
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
if 11 < 3:
self.more_to_write += [sum(self.mean**2)]
if 11 < 3: # plot length of mean - mold
self.more_to_write += [self.sp.weights.mueff**0.5 *
sum(((1. / self.D) * np.dot(self.B.T, self.mean - mold))**2)**0.5 /
self.sigma / N**0.5 / cmean]
### line 2799
# get learning rate constants
cc = sp.cc
c1 = self.opts['CMA_on'] * self.opts['CMA_rankone'] * self.sm.parameters(
mueff=sp.weights.mueff, lam=sp.weights.lambda_).get('c1', sp.c1) # mueff and lambda_ should not be necessary here
cmu = self.opts['CMA_on'] * self.opts['CMA_rankmu'] * self.sm.parameters().get('cmu', sp.cmu)
if flg_diagonal:
cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
# now the real work can start
# _update_ps must be called before the distribution is changed,
# hsig() calls _update_ps
hsig = self.adapt_sigma.hsig(self)
if 11 < 3:
# hsig = 1
# sp.cc = 4 / (N + 4)
# sp.cs = 4 / (N + 4)
# sp.cc = 1
# sp.damps = 2 #
# sp.CMA_on = False
# c1 = 0 # 2 / ((N + 1.3)**2 + 0 * sp.weights.mu) # 1 / N**2
# cmu = min([1 - c1, cmu])
if self.countiter == 1:
print('parameters modified')
# hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
# adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
# hsig leads to premature convergence of C otherwise
# hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
c1a = c1 * (1 - (1 - hsig**2) * cc * (2 - cc)) # adjust for variance loss
if 11 < 3: # diagnostic data
# self.out['hsigcount'] += 1 - hsig
if not hsig:
self.hsiglist.append(self.countiter)
if 11 < 3: # diagnostic message
if not hsig:
print(str(self.countiter) + ': hsig-stall')
if 11 < 3: # for testing purpose
hsig = 1 # TODO:
# put correction term, but how?
if self.countiter == 1:
print('hsig=1')
self.pc = (1 - cc) * self.pc + hsig * (
(cc * (2 - cc) * self.sp.weights.mueff)**0.5 / self.sigma
/ cmean) * (self.mean - mold) / self.sigma_vec.scaling
# covariance matrix adaptation/udpate
pop_zero = pop - mold
if c1a + cmu > 0:
# TODO: make sure cc is 1 / N**0.5 rather than 1 / N
# TODO: simplify code: split the c1 and cmu update and call self.sm.update twice
# caveat: for this the decay factor ``c1_times_delta_hsigma - sum(weights)`` should be zero in the second update
sampler_weights = [c1a] + [cmu * w for w in sp.weights]
if len(pop_zero) > len(sp.weights):
sampler_weights = (
sampler_weights[:1+sp.weights.mu] +
(len(pop_zero) - len(sp.weights)) * [0] +
sampler_weights[1+sp.weights.mu:])
if 'inc_cmu_pos' in self.opts['vv']:
sampler_weights = np.asarray(sampler_weights)
sampler_weights[sampler_weights > 0] *= 1 + self.opts['vv']['inc_cmu_pos']
# logger = logging.getLogger(__name__) # "global" level needs to be DEBUG
# logger.debug("w[0,1]=%f,%f", sampler_weights[0],
# sampler_weights[1]) if self.countiter < 2 else None
# print(' injected solutions', tuple(self._injected_solutions_archive.values()))
for i, x in enumerate(pop):
try:
self._injected_solutions_archive.pop(x)
# self.gp.repaired_solutions.pop(x)
except KeyError:
pass # print(i)
else:
# print(i + 1, '-th weight set to zero')
sampler_weights[i + 1] = 0 # weight zero is for pc
for s in list(self._injected_solutions_archive):
if self._injected_solutions_archive[s]['iteration'] < self.countiter - 2:
warnings.warn("""orphanated injected solution %s
This could be a bug in the calling order/logics or due to
a too small popsize used in `ask()` or when only using
`ask(1)` repeatedly. Please check carefully.
In case this is desired, the warning can be surpressed with
``warnings.simplefilter("ignore", cma.evolution_strategy.InjectionWarning)``
""" % str(self._injected_solutions_archive.pop(s)),
InjectionWarning)
assert len(sampler_weights) == len(pop_zero) + 1
if flg_diagonal:
self.sigma_vec.update(
[self.sm.transform_inverse(self.pc)] +
list(self.sm.transform_inverse(pop_zero /
(self.sigma * self.sigma_vec.scaling))),
array(sampler_weights) / 2) # TODO: put the 1/2 into update function!?
else:
self.sm.update([(c1 / (c1a + 1e-23))**0.5 * self.pc] + # c1a * pc**2 gets c1 * pc**2
list(pop_zero / (self.sigma * self.sigma_vec.scaling)),
sampler_weights)
if any(np.asarray(self.sm.variances) < 0):
raise RuntimeError("""A sampler variance has become
negative after update, this must be considered as a bug.
Variances `self.sm.variances`=%s""" % str(self.sm.variances))
self._updateBDfromSM(self.sm)
# step-size adaptation, adapt sigma
# in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old
try:
self.sigma *= self.adapt_sigma.update2(self,
function_values=function_values)
except (NotImplementedError, AttributeError):
self.adapt_sigma.update(self, function_values=function_values)
if 11 < 3 and self.opts['vv']:
if self.countiter < 2:
print('constant sigma applied')
print(self.opts['vv']) # N=10,lam=10: 0.8 is optimal
self.sigma = self.opts['vv'] * self.sp.weights.mueff * sum(self.mean**2)**0.5 / N
if any(self.sigma * self.sigma_vec.scaling * self.dC**0.5 <
np.asarray(self.opts['minstd'])):
self.sigma = max(np.asarray(self.opts['minstd']) /
(self.sigma_vec * self.dC**0.5))
assert all(self.sigma * self.sigma_vec * self.dC**0.5 >=
(1-1e-9) * np.asarray(self.opts['minstd']))
elif any(self.sigma * self.sigma_vec.scaling * self.dC**0.5 >
np.asarray(self.opts['maxstd'])):
self.sigma = min(np.asarray(self.opts['maxstd']) /
self.sigma_vec * self.dC**0.5)
# g = self.countiter
# N = self.N
# mindx = eval(self.opts['mindx'])
# if utils.is_str(self.opts['mindx']) else self.opts['mindx']
if self.sigma * min(self.D) < self.opts['mindx']: # TODO: sigma_vec is missing here
self.sigma = self.opts['mindx'] / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.sm.variances)**0.5
if alpha > 1:
self.sigma /= alpha**0.5 # adjust only half
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
self.sm *= alpha
self._updateBDfromSM()
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level
# move mean into "feasible preimage", leads to weird behavior on
# 40-D tablet with bound 0.1, not quite explained (constant
# dragging is problematic, but why doesn't it settle), still a bug?
if 11 < 3 and isinstance(self.boundary_handler, BoundTransform) \
and not self.boundary_handler.is_in_bounds(self.mean):
self.mean = array(self.boundary_handler.inverse(
self.boundary_handler.repair(self.mean, copy_if_changed=False),
copy_if_changed=False), copy=False)
if _new_injections:
self.pop_injection_directions = self._prepare_injection_directions()
if self.opts['verbose'] > 4 and self.countiter < 3 and type(self.adapt_sigma) is not CMAAdaptSigmaTPA and len(self.pop_injection_directions):
utils.print_message(' %d directions prepared for injection %s' %
(len(self.pop_injection_directions),
"(no more messages will be shown)" if
self.countiter == 2 else ""))
self.number_of_injections_delivered = 0
self.pop = [] # remove this in case pop is still needed
# self.pop_sorted = []
self._flgtelldone = True
try: # shouldn't fail, but let's be nice to code abuse
self.timer.pause()
except AttributeError:
utils.print_warning("""
"timer" attribute not found, probably because `ask` was never called.
Timing is likely to work only until `tell` is called (again), because
`tic` will never be called again afterwards.
""",
'tell', 'CMAEvolutionStrategy',
self.countiter)
self.timer = utils.ElapsedWCTime()
self.more_to_write.check()
# end tell()
def inject(self, solutions, force=None):
"""inject list of one or several genotypic solution(s).
This is the preferable way to pass outside proposal solutions
into `CMAEvolutionStrategy`. Passing (bad) solutions directly
via `tell` is likely to fail when ``CMA_active is True`` as by
default.
Unless ``force is True``, the `solutions` are used as direction
relative to the distribution mean to compute a new candidate
solution returned in method `ask_geno` which in turn is used in
method `ask`. Even when ``force is True``, the update in `tell`
takes later care of possibly trimming the update vector.
`inject` is to be called before `ask` or after `tell` and can be
called repeatedly.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 2) #doctest: +ELLIPSIS
(4_w,...
>>> while not es.stop():
... es.inject([4 * [0.0]])
... X = es.ask()
... if es.countiter == 0:
... assert X[0][0] == X[0][1] # injected sol. is on the diagonal
... es.tell(X, [cma.ff.sphere(x) for x in X])
Details: injected solutions are not used in the "active" update which
would decrease variance in the covariance matrix in this direction.
"""
for solution in solutions:
if solution is None:
continue
if len(solution) != self.N:
raise ValueError('method `inject` needs a list or array'
+ (' each el with dimension (`len`) %d' % self.N))
solution = | array(solution, copy=False, dtype=float) | numpy.array |
'''
Using characterised SVs, count normal and supporting reads at SV locations
'''
import warnings
import os
import configparser
import numpy as np
import pysam
import csv
import vcf
from collections import OrderedDict
from operator import methodcaller
from . import bamtools
from . import svDetectFuncs as svd
from . import svp_dtypes as dtypes
def read_to_array(x,bamf):
chrom = bamf.getrname(x.reference_id)
try:
read = np.array((x.query_name,chrom,x.reference_start,x.reference_end,x.query_alignment_start,
x.query_alignment_end,x.query_length,x.tlen,np.bool(x.is_reverse)),dtype=dtypes.read_dtype)
return read
except TypeError:
print('Warning: record %s contains invalid attributes, skipping' % x.query_name)
#return np.empty(len(dtypes.read_dtype),dtype=dtypes.read_dtype)
return np.empty(0)
def is_soft_clipped(read):
return (read['align_start'] != 0) or (read['align_end'] != read['len'])
def is_below_sc_threshold(read,threshold):
return (read['align_start'] < threshold) and (read['len'] - read['align_end'] < threshold)
def is_normal_non_overlap(read,mate,pos,min_ins,max_ins,threshold):
'''
if read and mate have normal insert size, are not soft-clipped,
and do not overlap the breakpoint (insert or read), return true
'''
if read['chrom']!=mate['chrom']:
return False
return (not is_soft_clipped(read)) and \
(abs(read['ins_len']) < max_ins and abs(read['ins_len']) > min_ins) and \
(abs(mate['ins_len']) < max_ins and abs(mate['ins_len']) > min_ins) and \
not (read['ref_start'] < (pos + threshold) and read['ref_end'] > (pos - threshold)) and \
not (mate['ref_start'] < (pos + threshold) and mate['ref_end'] > (pos - threshold)) and \
not (read['ref_start'] < pos and mate['ref_end'] > pos)
def is_normal_across_break(read,pos,min_ins,max_ins,norm_overlap):
# must overhang break by at least the norm overlap parameter
return is_below_sc_threshold(read,2) and \
(abs(read['ins_len']) < max_ins and abs(read['ins_len']) > min_ins) and \
(read['ref_start'] < (pos - norm_overlap) and read['ref_end'] > (pos + norm_overlap))
def get_normal_overlap_bases(read,pos):
return min( [abs(read['ref_start']-pos), abs(read['ref_end']-pos)] )
def is_normal_spanning(read,mate,pos,min_ins,max_ins,sc_len):
if not is_soft_clipped(read) and not is_soft_clipped(mate):
if (not read['is_reverse'] and mate['is_reverse']) or (read['is_reverse'] and not mate['is_reverse']):
return (abs(read['ins_len']) < max_ins and abs(read['ins_len']) > min_ins) and \
(read['ref_start'] < (pos + sc_len) and mate['ref_end'] > (pos - sc_len))
return False
def is_supporting_split_read(read,pos,max_ins,sc_len,threshold):
'''
Return whether read is a supporting split read.
Doesn't yet check whether the soft-clip aligns
to the other side.
'''
if read['align_start'] < (threshold): #a "soft" threshold if it is soft-clipped at the other end
return read['ref_end'] > (pos - threshold) and read['ref_end'] < (pos + threshold) and \
(read['len'] - read['align_end'] >= sc_len) and abs(read['ins_len']) < max_ins
else:
return read['ref_start'] > (pos - threshold) and read['ref_start'] < (pos + threshold) and \
(read['align_start'] >= sc_len) and abs(read['ins_len']) < max_ins
def is_supporting_split_read_wdir(bp_dir,read,pos,max_ins,sc_len,threshold):
if bp_dir=='+':
return read['ref_end'] > (pos - threshold) and read['ref_end'] < (pos + threshold) and \
(read['len'] - read['align_end'] >= sc_len) and abs(read['ins_len']) < max_ins
elif bp_dir=='-':
return read['ref_start'] > (pos - threshold) and read['ref_start'] < (pos + threshold) and \
(read['align_start'] >= sc_len) and abs(read['ins_len']) < max_ins
else:
return False
def is_supporting_split_read_lenient(read,pos,threshold):
'''
Same as is_supporting_split_read without insert and soft-clip threshold checks
'''
if read['align_start'] < 5: #a "soft" threshold if it is soft-clipped at the other end
return (read['len'] - read['align_end'] >= threshold) and read['ref_end'] > (pos - threshold) and \
read['ref_end'] < (pos + threshold)
else:
return (read['align_start'] >= threshold) and read['ref_start'] > (pos - threshold) and \
read['ref_start'] < (pos + threshold)
def get_sc_bases(read,pos,threshold):
'''
Return the number of soft-clipped bases
'''
if read['align_start'] < (threshold):
return read['len'] - read['align_end']
else:
return read['align_start']
def get_bp_dist(read,bp_pos):
if read['is_reverse']:
return (read['ref_end'] - bp_pos)
else:
return (bp_pos - read['ref_start'])
def points_towards_break(read,pos,threshold):
scenario1 = read['is_reverse'] and read['ref_end'] > pos
scenario2 = not read['is_reverse'] and read['ref_start'] < pos
return scenario1 or scenario2
def is_supporting_spanning_pair(read,mate,bp1,bp2,inserts,max_ins,threshold):
pos1 = (bp1['start'] + bp1['end']) / 2
pos2 = (bp2['start'] + bp2['end']) / 2
#ensure this isn't just a regular old spanning pair
if read['chrom']==mate['chrom']:
if read['ref_start']<mate['ref_start']:
if mate['ref_start']-read['ref_end'] < max_ins: return False
else:
if read['ref_start']-mate['ref_end'] < max_ins: return False
#check read orientation
#spanning reads should always point towards the break
if not points_towards_break(read,pos1,threshold) or not points_towards_break(mate,pos2,threshold):
return False
ins_dist1 = get_bp_dist(read,pos1)
ins_dist2 = get_bp_dist(mate,pos2)
if is_supporting_split_read_lenient(read,pos1,threshold):
if not is_below_sc_threshold(mate,threshold):
#only allow one soft-clip
return False
if abs(ins_dist1)+abs(ins_dist2) < max_ins:
return True
elif is_supporting_split_read_lenient(mate,pos2,threshold):
if not is_below_sc_threshold(read,threshold):
return False
if abs(ins_dist1)+abs(ins_dist2) < max_ins:
return True
else:
if ins_dist1>=-threshold and ins_dist2>=-threshold and abs(ins_dist1)+abs(ins_dist2) < max_ins:
return True
return False
def fetch_reads(loc,bamf,max_dp):
err_code = 0
loc_reads = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
iter_loc = bamf.fetch(region=loc,until_eof=True)
for x in iter_loc:
read = read_to_array(x,bamf)
if len(np.atleast_1d(read))>0:
loc_reads = np.append(loc_reads,read)
if len(loc_reads) > max_dp:
print('Read depth too high at %s' % loc)
err_code = 1
return np.empty(0), err_code
loc_reads = np.sort(loc_reads,axis=0,order=['query_name','ref_start'])
loc_reads = np.unique(loc_reads) #remove duplicates
return loc_reads, err_code
def get_loc_reads(bp,bamf,max_dp,new_pysam=False):
loc = '%s:%d:%d' % (bp['chrom'], max(0,bp['start']), bp['end'])
try:
loc_reads, err_code = fetch_reads(loc, bamf, max_dp)
return loc_reads, err_code
except ValueError:
pass
try:
# try with updated pysam loc format...
loc = '%s:%d-%d' % (bp['chrom'], max(0,bp['start']), bp['end'])
loc_reads, err_code = fetch_reads(loc, bamf, max_dp)
return loc_reads, err_code
except:
print('Fetching reads failed for loc: %s' % loc)
err_code = 2
return np.empty(0), err_code
def reads_to_sam(reads,bam,bp1,bp2,dirout,name):
'''
For testing read assignemnts.
Takes reads from array, matches them to bam
file reads by query name and outputs them to Sam
'''
reads = np.unique(reads['query_name'])
bamf = pysam.AlignmentFile(bam, "rb")
loc1 = '%s:%d:%d' % (bp1['chrom'], bp1['start'], bp1['end'])
loc2 = '%s:%d:%d' % (bp2['chrom'], bp2['start'], bp2['end'])
iter_loc1 = bamf.fetch(region=loc1,until_eof=True)
iter_loc2 = bamf.fetch(region=loc2,until_eof=True)
loc1 = '%s-%d' % (bp1['chrom'], (bp1['start']+bp1['end'])/2)
loc2 = '%s-%d' % (bp2['chrom'], (bp1['start']+bp1['end'])/2)
sam_name = '%s_%s-%s' % (name,loc1,loc2)
if not os.path.exists(dirout):
os.makedirouts(dirout)
bam_out = pysam.AlignmentFile('%s/%s.sam'%(dirout,sam_name), "w", header=bamf.header)
for x in iter_loc1:
if len(reads)==0:
break
if x.query_name in reads:
bam_out.write(x)
bam_out.write(bamf.mate(x))
idx = int(np.where(reads==x.query_name)[0])
reads = np.delete(reads,idx)
for x in iter_loc2:
if len(reads)==0:
break
if x.query_name in reads:
bam_out.write(x)
bam_out.write(bamf.mate(x))
idx = int(np.where(reads==x.query_name)[0])
reads = np.delete(reads,idx)
bamf.close()
bam_out.close()
def windowed_norm_read_count(loc_reads,inserts,min_ins,max_ins):
'''
Counts normal non-soft-clipped reads within window range
'''
cnorm = 0
for idx,r in enumerate(loc_reads):
if idx+1 >= len(loc_reads):
break
r1 = np.array(loc_reads[idx],copy=True)
r2 = np.array(loc_reads[idx+1],copy=True)
if r1['query_name']!=r2['query_name'] or r1['chrom']!=r2['chrom']:
continue
ins_dist = r2['ref_end']-r1['ref_start']
facing = not r1['is_reverse'] and r2['is_reverse']
if not is_soft_clipped(r1) and not is_soft_clipped(r2) and facing and ins_dist > min_ins and ins_dist < max_ins:
cnorm = cnorm + 2
return cnorm
def get_loc_counts(bp,loc_reads,pos,rc,reproc,split,norm,min_ins,max_ins,sc_len,norm_overlap,threshold,bp_num=1):
for idx,x in enumerate(loc_reads):
if idx+1 >= len(loc_reads):
break
if x['query_name'] in norm['query_name']:
continue
r1 = loc_reads[idx]
r2 = loc_reads[idx+1] if (idx+2)<=len(loc_reads) else None
if is_normal_non_overlap(r1,r2,pos,min_ins,max_ins,threshold):
continue
elif is_normal_across_break(x,pos,min_ins,max_ins,norm_overlap):
norm = np.append(norm,r1)
split_norm = 'split_norm%d'%bp_num
norm_olap = 'norm_olap_bp%d'%bp_num
rc[split_norm] = rc[split_norm]+1
rc[norm_olap] = rc[norm_olap]+get_normal_overlap_bases(x,pos)
elif is_supporting_split_read(x,pos,max_ins,sc_len,threshold):
split_supp = 'split%d'%bp_num
split_cnt = 'sc_bases%d'%bp_num
if bp['dir'] in ['+','-']:
if is_supporting_split_read_wdir(bp['dir'],x,pos,max_ins,sc_len,threshold):
split = np.append(split,x)
rc[split_supp] = rc[split_supp]+1
rc[split_cnt] = rc[split_cnt]+get_sc_bases(x,pos,threshold)
else:
reproc = np.append(reproc,x) #may be spanning support or anomalous
elif r2!=None and r1['query_name']==r2['query_name'] and is_normal_spanning(r1,r2,pos,min_ins,max_ins,0):
norm_across1 = is_normal_across_break(r1, pos, min_ins, max_ins, norm_overlap)
norm_across2 = is_normal_across_break(r2, pos, min_ins, max_ins, norm_overlap)
if not norm_across1 and not norm_across2:
norm = np.append(norm,r1)
norm = np.append(norm,r2)
span_norm = 'span_norm%d'%bp_num
rc[span_norm] = rc[span_norm]+1
else:
reproc = np.append(reproc,x) #may be spanning support or anomalous
return rc, reproc, split, norm
def bp_dir_matches_read_orientation(bp,pos,read):
if bp['dir']=='+':
return read['ref_start'] < pos and not read['is_reverse']
elif bp['dir']=='-':
return read['ref_end'] > pos and read['is_reverse']
def validate_spanning_orientation(bp1,bp2,r1,r2):
pos1 = (bp1['start'] + bp1['end']) / 2
pos2 = (bp2['start'] + bp2['end']) / 2
r1_correct = bp_dir_matches_read_orientation(bp1,pos1,r1)
r2_correct = bp_dir_matches_read_orientation(bp2,pos2,r2)
return r1_correct and r2_correct
def get_spanning_counts(reproc,rc,bp1,bp2,inserts,min_ins,max_ins,threshold):
pos1 = (bp1['start'] + bp1['end']) / 2
pos2 = (bp2['start'] + bp2['end']) / 2
reproc = np.sort(reproc,axis=0,order=['query_name','ref_start'])
reproc = np.unique(reproc) #remove dups
anomalous = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
span_bp1 = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
span_bp2 = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
for idx,x in enumerate(reproc):
if idx+1 >= len(reproc):
break
if reproc[idx+1]['query_name'] != reproc[idx]['query_name']:
#not paired
continue
mate = np.array(reproc[idx+1],copy=True)
r1 = np.array(x,copy=True)
r2 = np.array(mate,copy=True)
#if read corresponds to bp2 and mate to bp1, switch their order
if (bp1['chrom']!=bp2['chrom'] and r1['chrom']==bp2['chrom']) or \
(pos1 > pos2 and bp1['chrom']==bp2['chrom']):
r1 = mate
r2 = np.array(x,copy=True)
if is_supporting_spanning_pair(r1,r2,bp1,bp2,inserts,max_ins,threshold):
if bp1['dir'] in ['+','-'] and bp2['dir'] in ['-','+']:
if validate_spanning_orientation(bp1,bp2,r1,r2):
span_bp1 = np.append(span_bp1,r1)
span_bp2 = np.append(span_bp2,r2)
rc['spanning'] = rc['spanning']+1
else:
anomalous = np.append(anomalous,r1)
anomalous = np.append(anomalous,r2)
else:
anomalous = np.append(anomalous,r1)
anomalous = np.append(anomalous,r2)
return rc,span_bp1,span_bp2,anomalous
def get_sv_read_counts(row,bam,rparams,out,split_reads,span_reads,anom_reads):
inserts, min_ins, max_ins, max_dp = rparams['insert'], rparams['min_ins'], rparams['max_ins'], rparams['max_dp']
threshold, sc_len, norm_overlap = rparams['threshold'], rparams['threshold'], rparams['norm_overlap']
sv_id, chr1_field, pos1_field, dir1_field, \
chr2_field, pos2_field, \
dir2_field, sv_class, \
oid_field, opos1_field, opos2_field = [h[0] for h in dtypes.sv_dtype]
bp1 = np.array((row[chr1_field],row[pos1_field]-max_ins,
row[pos1_field]+max_ins,row[dir1_field]),dtype=dtypes.bp_dtype)
bp2 = np.array((row[chr2_field],row[pos2_field]-max_ins,
row[pos2_field]+max_ins,row[dir2_field]),dtype=dtypes.bp_dtype)
pos1, pos2 = row[pos1_field], row[pos2_field]
rc = np.zeros(1,dtype=dtypes.sv_out_dtype)[0]
rc['chr1'], rc['pos1'], rc['dir1'] = row[chr1_field], row[pos1_field], row[dir1_field]
rc['chr2'], rc['pos2'], rc['dir2'] = row[chr2_field], row[pos2_field], row[dir2_field]
rc['ID'], rc['classification'] = row[sv_id], row[sv_class]
if row[dir1_field] not in ['+','-'] or row[dir2_field] not in ['+','-']:
#one or both breaks don't have a valid direction
return rc, split_reads, span_reads, anom_reads
bamf = pysam.AlignmentFile(bam, "rb")
loc1_reads, err_code1 = get_loc_reads(bp1,bamf,max_dp)
loc2_reads, err_code2 = get_loc_reads(bp2,bamf,max_dp)
bamf.close()
if not (err_code1==0 and err_code2==0) or (len(loc1_reads)==0 or len(loc2_reads)==0):
sv_class = str(row['classification'])
if err_code1 == 1 or err_code2 == 1:
rc['classification'] = 'HIDEP' if sv_class=='' else sv_class+';HIDEP'
return rc, split_reads, span_reads, anom_reads
elif err_code1 == 2 or err_code2 == 2:
rc['classification'] = 'READ_FETCH_FAILED' if sv_class=='' else sv_class+';READ_FETCH_FAILED'
return rc, split_reads, span_reads, anom_reads
else:
rc['classification'] = 'NO_READS' if sv_class=='' else sv_class+';NO_READS'
return rc, split_reads, span_reads, anom_reads
reproc = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
rc['total_reads1'] = len(loc1_reads)
rc['total_reads2'] = len(loc2_reads)
split_bp1 = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
split_bp2 = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
norm = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype)
rc, reproc, split_bp1, norm = get_loc_counts(bp1, loc1_reads, pos1, rc, reproc, split_bp1, \
norm, min_ins, max_ins, sc_len, norm_overlap, threshold)
rc, reproc, split_bp2, norm = get_loc_counts(bp2, loc2_reads, pos2, rc, reproc, split_bp2, \
norm,min_ins, max_ins, sc_len, norm_overlap, threshold, 2)
rc['win_norm1'] = windowed_norm_read_count(loc1_reads,inserts,min_ins,max_ins)
rc['win_norm2'] = windowed_norm_read_count(loc2_reads,inserts,min_ins,max_ins)
rc, span_bp1, span_bp2, anomalous = get_spanning_counts(reproc,rc,bp1,bp2,inserts,min_ins,max_ins,threshold)
spanning = span_bp1
spanning = np.concatenate([span_bp1,span_bp2]) if (len(span_bp1)>0 and len(span_bp2)>0) else spanning
spanning = span_bp2 if len(span_bp1)==0 else spanning
span_reads = | np.append(span_reads,spanning) | numpy.append |
#Author: <NAME>
import numpy as np
import os
import h5py
import pandas as pd
from AxonImaging import signal_processing as sp
def get_processed_running_speed (vsig,vref,sample_freq, smooth_filter_sigma = 0.05, wheel_diameter = 16.51, positive_speed_threshold= 70, negative_speed_threshold= -5):
''' Returns the running speed given voltage changes from an encoder wheel. Speeds are smoothed and outlier
values above or below arbrituarly defined thresholds are set as NaN.
:param Vsig: voltage signal which changes as a function of wheel movement (running)
:param Vref: reference voltage (typically 5V +/- small offset that is encoder dependent
:param sample_freq: sampling frequency which Vsig and Vref are acquired at
:param smooth_filter_sigma: value used for guassian filtering
:param wheel_diameter: diameter of running wheel
:param positive_speed_threshold: maximum allowed positive speed (sets impossibly high running speeds equal to NaN)
:param negative_speed_threshold: maximum allowed negative speed (sets impossibly high backwards running speeds equal to NaN)
:param units: whether to return in terms of seconds (dependent on the passed-in sample freq) or samples
:return: smooth traced of running speed in cm/s per sample with outliers set to NaN
'''
from scipy.ndimage import gaussian_filter1d
vref_mean = np.median(vref[np.abs(vref)<20])
position_arc = vsig*(2.*np.pi)/vref_mean
position_arc_smooth = gaussian_filter1d(position_arc, int(smooth_filter_sigma*sample_freq))
speed_arc = np.append(np.diff(position_arc_smooth),0) * sample_freq
speed = speed_arc * wheel_diameter
speed_smooth = np.copy(speed)
speed_smooth[np.logical_or(speed>=positive_speed_threshold,speed<=negative_speed_threshold)]=np.nan
mask = | np.isnan(speed_smooth) | numpy.isnan |
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
import skimage.transform as trans
import tensorflow as tf
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
import tensorflow.keras as keras
import keras.backend as K
import cv2
Sky = [128, 128, 128]
Building = [128, 0, 0]
Pole = [192, 192, 128]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrian = [64, 64, 0]
Bicyclist = [0, 128, 192]
Unlabelled = [0, 0, 0]
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def set_GPU_Memory_Limit():
""" Set the GPU memory limit for the program when using Tensorflow GPU """
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def Unet_scheduler(epoch, lr):
"""
learning rate decay
"""
if epoch < 2:
return lr
elif epoch < 5:
return 1e-4
elif epoch < 10:
return 1e-4
else:
return lr * tf.math.exp(-0.05)
def adjustData(img, mask, flag_multi_class, num_class):
"""
Rescale image and turn the mask to one hot vector
"""
if flag_multi_class:
img = img / 255
mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0] # [batch_size,w,h,channel]
new_mask = np.zeros(mask.shape + (num_class,)) # add one dimension for num_class size
for i in range(num_class):
# for one pixel in the image, find the class in mask and convert it into one-hot vector
# index = np.where(mask == i)
# index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
# new_mask[index_mask] = 1
new_mask[mask == i, i] = 1
new_mask = np.reshape(new_mask, (new_mask.shape[0], new_mask.shape[1] * new_mask.shape[2],
new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask, (
new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2]))
mask = new_mask
else:
img = img / 255 # can be replace by setting rescale parameter in ImageDataGenerator
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def trainGenerator(batch_size, train_path, image_folder, mask_folder, aug_dict, image_color_mode="grayscale",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
flag_multi_class=False, num_class=2, save_to_dir=None, target_size=(256, 256), seed=1):
"""
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
"""
if save_to_dir and not os.path.exists(save_to_dir):
os.mkdir(save_to_dir)
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = adjustData(img, mask, flag_multi_class, num_class)
yield img, mask
def testGenerator(test_path, num_image=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):
"""Test generator, generate image for testing """
assert len(glob.glob(os.path.join(test_path,"*.png"))) <= num_image, "num_image need to be smaller than test image in current test_path"
for i in range(num_image):
img = io.imread(os.path.join(test_path, "%d.png" % i), as_gray=as_gray)
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,)) if (not flag_multi_class) else img
img = np.reshape(img, (1,) + img.shape)
yield img
def geneTrainNpy(image_path, mask_path, flag_multi_class=False, num_class=2, image_prefix="image", mask_prefix="mask",
image_as_gray=True, mask_as_gray=True):
image_name_arr = glob.glob(os.path.join(image_path, "%s*.png" % image_prefix))
image_arr = []
mask_arr = []
for index, item in enumerate(image_name_arr):
img = io.imread(item, as_gray=image_as_gray)
img = np.reshape(img, img.shape + (1,)) if image_as_gray else img
mask = io.imread(item.replace(image_path, mask_path).replace(image_prefix, mask_prefix), as_gray=mask_as_gray)
mask = np.reshape(mask, mask.shape + (1,)) if mask_as_gray else mask
img, mask = adjustData(img, mask, flag_multi_class, num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr, mask_arr
def labelVisualize(num_class, color_dict, img):
"""
visualize the label image
"""
img = img[:, :, 0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i, :] = color_dict[i]
return img_out / 255
def saveResult(save_path, npyfile, flag_multi_class=False, num_class=2):
"""
save the visualized result
"""
if not os.path.exists(save_path):
os.mkdir(save_path)
for i, item in enumerate(npyfile):
img = labelVisualize(num_class, COLOR_DICT, item) if flag_multi_class else item[:, :, 0]
io.imsave(os.path.join(save_path, "%d_predict.png" % i), img_as_ubyte(img))
def visualize_training_results(hist, save_path="../results/UNet/Unet_training", loss_flag=True, acc_flag=True,lr_flag=False):
"""
visualize the loss function/acc/lr during the training process
"""
print("Training history has key:")
for key in hist.history:
print(key)
loss = hist.history['loss']
acc = hist.history['accuracy']
lr = hist.history['lr']
if loss_flag:
plt.plot(np.arange(len(loss)), loss)
plt.scatter(np.arange(len(loss)), loss, c='g')
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training loss")
plt.savefig(os.path.join(save_path, "loss.png"))
plt.show()
if acc_flag:
plt.plot(np.arange(len(acc)), acc)
plt.scatter(np.arange(len(acc)), acc, c='g')
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.title("Training accuracy")
plt.savefig(os.path.join(save_path, "acc.png"))
plt.show()
if lr_flag:
plt.plot(np.arange(len(lr)), lr)
plt.scatter(np.arange(len(lr)), lr, c='g')
plt.xlabel("Epoch")
plt.ylabel("Learning rate")
plt.title("Training learning rate decay")
plt.savefig(os.path.join(save_path, "lr.png"))
plt.show()
def bce_dice_loss(y_true, y_pred):
"""
Training loss: BinaryCrossEntropy
"""
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
def dice_coef(y_true, y_pred):
"""
Training loss: dice loss.
Dice coefficient: 2* overlapped area space / total space
"""
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1. - dice_coef(y_true, y_pred)
def compute_dice(im1, im2, empty_score=1.0):
"""
Evaluation metric: Dice
"""
im1 = np.asarray(im1 > 0.5).astype(np.bool)
im2 = np.asarray(im2 > 0.5).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
def compute_metrics(y_true, y_pred):
"""
metrics of V_rand and V_info
"""
v_rand,v_info=None,None
pred_label = (y_pred > 0.5).astype(np.uint8)
gt_label = (y_true > 0.5).astype(np.uint8)
pred_num, pred_out = cv2.connectedComponents(pred_label, connectivity=4)
gt_num, gt_out = cv2.connectedComponents(gt_label, connectivity=4)
p = np.zeros((pred_num+1, gt_num+1))
for i in range(pred_num+1):
tmp_mask = (pred_out==i)
for j in range(gt_num+1):
if i==0 or j==0:
p[i][j]=0
else:
p[i][j] = | np.logical_and(tmp_mask, gt_out==j) | numpy.logical_and |
"""Tests for :mod:`pyts.image` module."""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from future import standard_library
from itertools import product
import numpy as np
from ..image import GASF, GADF, MTF, RecurrencePlots
standard_library.install_aliases()
def test_GASF():
"""Testing 'GASF'."""
# Parameter
size = 9
X = np.linspace(-1, 1, size)
# Test 1
ones = np.ones(size)
gasf = GASF(image_size=size)
arr_actual = gasf.transform(X[np.newaxis, :])[0]
arr_desired = np.outer(X, X) - np.outer(np.sqrt(ones - X ** 2),
np.sqrt(ones - X ** 2))
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
# Test 2
size_new = 3
ones_new = | np.ones(size_new) | numpy.ones |
#!/usr/bin/env python
"""
@package ion_functions.data.prs_functions
@file ion_functions/data/prs_functions.py
@author <NAME>, <NAME>
@brief Module containing calculations related to instruments in the Seafloor
Pressure family.
"""
import numexpr as ne
import numpy as np
import scipy as sp
from scipy import signal
"""
Listing of functions, in order encountered.
Functions calculating data products.
BOTTILT:
prs_bottilt_ccmp -- computes the BOTTILT-CCMP_L1 data product
prs_bottilt_tmag -- computes the BOTTILT-TMAG_L1 data product
prs_bottilt_tdir -- computes the BOTTILT-TDIR_L1 data product
BOTSFLU:
prs_botsflu_time15s -- computes the TIME15S-AUX auxiliary data product
prs_botsflu_meanpres -- computes the BOTSFLU-MEANPRES_L2 data product
prs_botsflu_predtide -- computes the BOTSFLU-PREDTIDE_L2 data product
prs_botsflu_meandepth -- computes the BOTSFLU-MEANDEPTH_L2 data product
prs_botsflu_5minrate -- computes the BOTSFLU-5MINRATE_L2 data product
prs_botsflu_10minrate -- computes the BOTSFLU-10MINRATE_L2 data product
prs_botsflu_time24h -- computes the TIME24H-AUX auxiliary data product
prs_botsflu_daydepth -- computes the BOTSFLU-DAYDEPTH_L2 data product
prs_botsflu_4wkrate -- computes the BOTSFLU-4WKRATE_L2 data product
prs_botsflu_8wkrate -- computes the BOTSFLU-8WKRATE_L2 data product
Functions calculating event notifications; they return either True or False.
BOTSFLU:
prs_tsunami_detection -- event notification specified by DPS
prs_eruption_imminent -- event notification specified by DPS
prs_eruption_occurred -- event notification specified by DPS
Worker functions called by functions calculating data products.
BOTSFLU
anchor_bin
calc_daydepth_plus
calc_meandepth_plus
calculate_sliding_means
calculate_sliding_slopes
"""
def prs_bottilt_ccmp(scmp, sn):
"""
Description:
OOI Level 1 Seafloor High-Resolution tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-CCMP_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Alternate code: faster, but less direct.
Usage:
ccmp = prs_bottilt_ccmp(scmp, sn)
where
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
scmp = Uncorrected sensor compass direction (BOTTILT-SCMP_L0) [degrees]
sn = LILY sensor serial number [unitless]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
"""
Currently, there are two coded algorithms:
(1) the straightforward original, which uses a two-element keyed dictionary;
(2) a faster version, which uses serial number keys to the dictionary.
Since each algorithm uses its own dictionary, the corresponding import statements
are TEMPORARILY placed at the beginning of their respective code sections
instead of at module top.
"""
### Original coding, using a dictionary constructed with 2-element keys.
# load the corrected compass directions table [(sn, scmp) keys]
from ion_functions.data.prs_functions_ccmp import cmp_lookup
# use the lookup table to get the ccmp
ccmp = np.zeros(len(scmp))
for i in range(len(scmp)):
ccmp[i] = cmp_lookup[(sn[i], int(round(scmp[i])))]
return ccmp
#### Faster coding, using a dictionary constructed with 1-element keys.
#
## load the corrected compass directions table [sn keys]
#from ion_functions.data.prs_functions_ccmp_lily_compass_cals import cmp_cal
#
## initialize output array for vectorized masking operations. this will 'break'
## the code if an invalid serial number is specified in the argument list.
#ccmp = np.zeros(len(scmp)) + np.nan
#
## round the uncorrected compass values to the nearest integer as specified in the DPS,
## which uses a lookup table consisting of integral values to do the correction.
#scmp = np.round(scmp)
#
## find the supported tilt sensor serial numbers, which are keys in the dictionary
#sernum = cmp_cal.keys()
#
#for ii in range(len(sernum)):
# # get the cal coeffs as a function of the iterated serial number;
# # x is the raw, uncorrected reading (scmp)
# # y is the corrected reading (ccmp)
# [x, y] = cmp_cal[sernum[ii]]
#
# # the boolean mask has 'true' entries where the elements of input vector sn
# # agree with the iterated serial number.
# # np.core.defchararray.equal handles vector string comparisons.
# mask = np.core.defchararray.equal(sn, sernum[ii])
#
# ## np.interp is used to do the 'lookup' for performance reasons (vectorized)
# ccmp[mask] = np.interp(scmp[mask], x, y)
#
## round to make sure we get an integral value (but not int type)
#return np.round(ccmp)
def prs_bottilt_tmag(x_tilt, y_tilt):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TMAG_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
Usage:
tmag = prs_bottilt(x_tilt, y_tilt)
where
tmag = Resultant tilt magnitude (BOTTILT-TMAG_L1) [microradians]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
tmag = ne.evaluate('sqrt(x_tilt**2 + y_tilt**2)')
return tmag
def prs_bottilt_tdir(x_tilt, y_tilt, ccmp):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TDIR_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Replaced initial code with arctan2 implementation.
Usage:
tdir = prs_bottilt(x_tilt, y_tilt, ccmp)
where
tdir = Resultant tilt direction (BOTTILT-TDIR_L1) [degrees]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
### As originally coded, according to the algorithm specified in the DPS:
## Calculate the angle to use in the tilt direction formula
## default angle calculation -- in degrees
#angle = ne.evaluate('arctan(y_tilt / x_tilt)')
#angle = np.degrees(angle)
#
## if X-Tilt == 0 and Y-Tilt > 0
#mask = np.logical_and(x_tilt == 0, y_tilt > 0)
#angle[mask] = 90.0
#
## if X-Tilt == 0 and Y-Tilt < 0
#mask = np.logical_and(x_tilt == 0, y_tilt < 0)
#angle[mask] = -90.0
#
## if Y-Tilt == 0
#mask = np.equal(y_tilt, np.zeros(len(y_tilt)))
#angle[mask] = 0.0
#
### Calculate the tilt direction, using the X-Tilt to set the equation
## default tilt direction equation
#tdir = ne.evaluate('(270 - angle + ccmp) % 360')
#
## if X-Tilt >= 0
#tmp = ne.evaluate('(90 - angle + ccmp) % 360')
#mask = np.greater_equal(x_tilt, np.zeros(len(x_tilt)))
#tdir[mask] = tmp[mask]
#
#return np.round(tdir)
# The preceding calculation is faster and simpler if the arctan2 function is used.
# Use 450 as an addend in the first argument to the mod function to make sure the result is positive.
return np.round(np.mod(450 - np.degrees(np.arctan2(y_tilt, x_tilt)) + ccmp, 360))
def prs_botsflu_time15s(timestamp):
"""
Description:
Calculates the auxiliary BOTSFLU data product TIME15S-AUX. These are timestamps
anchored at multiples of 15 seconds past the minute which correspond to the time
base for the BOTSFLU data products which are binned on 15 seconds.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
time15s = prs_botsflu_time15s(timestamp)
where
time15s = BOTSFLU-TIME15S-AUX [sec since 01-01-1900]
timestamp = OOI system timestamps [sec since 01-01-1900]
Notes:
The BOTSFLU data products associated with this timebase are:
MEANPRES
PREDTIDE
MEANDEPTH
5MINRATE
10MINRATE
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
# the second calling argument is a placeholder
time15s = anchor_bin(timestamp, None, bin_duration, 'time')
return time15s
def prs_botsflu_meanpres(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANPRES_L1.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
meanpres = prs_botsflu_meanpres(timestamp, botpres)
where
meanpres = BOTSFLU-MEANPRES_L2 [psi]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
meanpres, _ = anchor_bin(timestamp, botpres, bin_duration, 'data')
return meanpres
def prs_botsflu_predtide(time):
"""
Description:
Assigns tide values for the 3 BOTPT instrument sites about 500 km west of Astoria.
When the input argument is the data product TIME15S, the output of this function
will be the BOTSFLU data product PREDTIDE.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage:
PREDTIDE = prs_botsflu_predtide(TIME15S)
where
PREDTIDE = BOTSFLU-PREDTIDE data product [m]
TIME15S = BOTSFLU-TIME15S data product [sec since 01-01-1900].
Notes:
Lookup table in binary file: 'ion_functions/data/prs_functions_tides_2014_thru_2019.mat'
The lookup table contains tide values every 15 seconds from 2014-01-01 to 2020-01-01
at lat = 45.95547 lon = -130.00957 calculated by the Tide Model Driver software
written in Matlab (Mathworks, Natick, MA) using the TPXO7.2 global model. The tides
corresponding to time are determined by positional indexing (the first value is for
2014-01-01 00:00:00, the second is for 2014-01-01 00:00:15, etc). The 3 BOTPT sites
are close enough together that the caldera center location can be used for all, as
above: lat = 45.95547 lon = -130.00957.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
Matlab code to calculate tides using TPXO7.2 global model:
http://polaris.esr.org/ptm_index.html
Further documentation for the TPXO7.2 global tide model:
http://volkov.oce.orst.edu/tides/global.html
"""
time0 = 3597523200.0 # midnight, 2014-01-01
time_interval = 15.0 # seconds
# for unit test data, only, feb-apr 2011
if time[0] < time0:
time0 = 3502828800.0 # midnight, 2011-01-01
# tide values are signed 4 byte integers, units [0.001mm]
matpath = 'ion_functions/data/matlab_scripts/botpt/'
dict_tides = sp.io.loadmat(matpath + 'tides_15sec_2011_for_unit_tests.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
# else, OOI data from 2014 onwards
# tide values are signed 4 byte integers, units [0.001mm]
dict_tides = sp.io.loadmat('ion_functions/data/prs_functions_tides_2014_thru_2019.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
def prs_botsflu_meandepth(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANDEPTH_L2, de-tided bottom depth
as a function of time (15sec bins).
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
meandepth = prs_botsflu_meandepth(timestamp, botpres)
where
meandepth = BOTSFLU-MEANDEPTH_L2 [m]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
The DPS specifies that atmospheric pressure not be subtracted from the
L1 pressure data even though its units are [psia].
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
_, meandepth, _ = calc_meandepth_plus(timestamp, botpres)
return meandepth
def prs_botsflu_5minrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 5MINRATE_L2, the instantaneous rate of
depth change using 5 minute backwards-looking meandepth data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_5minrate = pprs_botsflu_5minrate(timestamp, botpres)
where
botsflu_5minrate = BOTSFLU-5MINRATE_L2 [cm/min]
timestamp = CI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate de-tided depth and the positions of non-zero bins in the original data.
_, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
# initialize data product including elements representing data gap positions
botsflu_5minrate = | np.zeros(mask_nonzero.size) | numpy.zeros |
# <NAME>, <NAME>, <NAME>
# MiniRocket: A Very Fast (Almost) Deterministic Transform for Time Series
# Classification
# https://arxiv.org/abs/2012.08791
# ** This is a naive extension of MiniRocket to multivariate time series. **
from numba import njit, prange, vectorize
import numpy as np
@njit("float32[:](float32[:,:,:],int32[:],int32[:],int32[:],int32[:],float32[:])", fastmath = True, parallel = False, cache = True)
def _fit_biases(X, num_channels_per_combination, channel_indices, dilations, num_features_per_dilation, quantiles):
num_examples, num_channels, input_length = X.shape
# equivalent to:
# >>> from itertools import combinations
# >>> indices = np.array([_ for _ in combinations(np.arange(9), 3)], dtype = np.int32)
indices = np.array((
0,1,2,0,1,3,0,1,4,0,1,5,0,1,6,0,1,7,0,1,8,
0,2,3,0,2,4,0,2,5,0,2,6,0,2,7,0,2,8,0,3,4,
0,3,5,0,3,6,0,3,7,0,3,8,0,4,5,0,4,6,0,4,7,
0,4,8,0,5,6,0,5,7,0,5,8,0,6,7,0,6,8,0,7,8,
1,2,3,1,2,4,1,2,5,1,2,6,1,2,7,1,2,8,1,3,4,
1,3,5,1,3,6,1,3,7,1,3,8,1,4,5,1,4,6,1,4,7,
1,4,8,1,5,6,1,5,7,1,5,8,1,6,7,1,6,8,1,7,8,
2,3,4,2,3,5,2,3,6,2,3,7,2,3,8,2,4,5,2,4,6,
2,4,7,2,4,8,2,5,6,2,5,7,2,5,8,2,6,7,2,6,8,
2,7,8,3,4,5,3,4,6,3,4,7,3,4,8,3,5,6,3,5,7,
3,5,8,3,6,7,3,6,8,3,7,8,4,5,6,4,5,7,4,5,8,
4,6,7,4,6,8,4,7,8,5,6,7,5,6,8,5,7,8,6,7,8
), dtype = np.int32).reshape(84, 3)
num_kernels = len(indices)
num_dilations = len(dilations)
num_features = num_kernels * np.sum(num_features_per_dilation)
biases = np.zeros(num_features, dtype = np.float32)
feature_index_start = 0
combination_index = 0
num_channels_start = 0
for dilation_index in range(num_dilations):
dilation = dilations[dilation_index]
padding = ((9 - 1) * dilation) // 2
num_features_this_dilation = num_features_per_dilation[dilation_index]
for kernel_index in range(num_kernels):
feature_index_end = feature_index_start + num_features_this_dilation
num_channels_this_combination = num_channels_per_combination[combination_index]
num_channels_end = num_channels_start + num_channels_this_combination
channels_this_combination = channel_indices[num_channels_start:num_channels_end]
_X = X[np.random.randint(num_examples)][channels_this_combination]
A = -_X # A = alpha * X = -X
G = _X + _X + _X # G = gamma * X = 3X
C_alpha = np.zeros((num_channels_this_combination, input_length), dtype = np.float32)
C_alpha[:] = A
C_gamma = np.zeros((9, num_channels_this_combination, input_length), dtype = np.float32)
C_gamma[9 // 2] = G
start = dilation
end = input_length - padding
for gamma_index in range(9 // 2):
C_alpha[:, -end:] = C_alpha[:, -end:] + A[:, :end]
C_gamma[gamma_index, :, -end:] = G[:, :end]
end += dilation
for gamma_index in range(9 // 2 + 1, 9):
C_alpha[:, :-start] = C_alpha[:, :-start] + A[:, start:]
C_gamma[gamma_index, :, :-start] = G[:, start:]
start += dilation
index_0, index_1, index_2 = indices[kernel_index]
C = C_alpha + C_gamma[index_0] + C_gamma[index_1] + C_gamma[index_2]
C = np.sum(C, axis = 0)
biases[feature_index_start:feature_index_end] = np.quantile(C, quantiles[feature_index_start:feature_index_end])
feature_index_start = feature_index_end
combination_index += 1
num_channels_start = num_channels_end
return biases
def _fit_dilations(input_length, num_features, max_dilations_per_kernel):
num_kernels = 84
num_features_per_kernel = num_features // num_kernels
true_max_dilations_per_kernel = min(num_features_per_kernel, max_dilations_per_kernel)
multiplier = num_features_per_kernel / true_max_dilations_per_kernel
max_exponent = np.log2((input_length - 1) / (9 - 1))
dilations, num_features_per_dilation = \
np.unique(np.logspace(0, max_exponent, true_max_dilations_per_kernel, base = 2).astype(np.int32), return_counts = True)
num_features_per_dilation = (num_features_per_dilation * multiplier).astype(np.int32) # this is a vector
remainder = num_features_per_kernel - np.sum(num_features_per_dilation)
i = 0
while remainder > 0:
num_features_per_dilation[i] += 1
remainder -= 1
i = (i + 1) % len(num_features_per_dilation)
return dilations, num_features_per_dilation
# low-discrepancy sequence to assign quantiles to kernel/dilation combinations
def _quantiles(n):
return np.array([(_ * ((np.sqrt(5) + 1) / 2)) % 1 for _ in range(1, n + 1)], dtype = np.float32)
def fit(X, num_features = 10_000, max_dilations_per_kernel = 32):
_, num_channels, input_length = X.shape
num_kernels = 84
dilations, num_features_per_dilation = _fit_dilations(input_length, num_features, max_dilations_per_kernel)
num_features_per_kernel = np.sum(num_features_per_dilation)
quantiles = _quantiles(num_kernels * num_features_per_kernel)
num_dilations = len(dilations)
num_combinations = num_kernels * num_dilations
max_num_channels = min(num_channels, 9)
max_exponent = | np.log2(max_num_channels + 1) | numpy.log2 |
import numpy as np
import os
from scipy.spatial import ConvexHull
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.cm as cm
class Generator(object):
def __init__(
self, num_examples_train, num_examples_test, num_clusters,
dataset_path, batch_size
):
self.num_examples_train = num_examples_train
self.num_examples_test = num_examples_test
self.batch_size = batch_size
self.dataset_path = dataset_path
self.input_size = 2
self.task = 'kmeans'
# clusters_train = [4, 8, 16]
clusters_train = [num_clusters]
clusters_test = [num_clusters]
self.clusters = {'train': clusters_train, 'test': clusters_test}
self.data = {'train': {}, 'test': {}}
def load_dataset(self):
for mode in ['train', 'test']:
for cl in self.clusters[mode]:
path = os.path.join(self.dataset_path, mode + str(cl))
path = path + 'kmeans_gauss.npz'
if os.path.exists(path):
print('Reading {} dataset for {} scales'
.format(mode, cl))
npz = | np.load(path) | numpy.load |
#
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ******************************************* MEMORY FUNCTIONS ********************************************************
"""
Functions that store and can retrieve a record of their current input.
* `Buffer`
* `ContentAddressableMemory`
* `DictionaryMemory`
Overview
--------
Functions that store and can return a record of their input.
"""
import numbers
import warnings
from collections import deque
from itertools import combinations, product
# from typing import Optional, Union, Literal, Callable
from typing import Optional, Union
import numpy as np
import typecheck as tc
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.functions.function import (
DEFAULT_SEED, FunctionError, _random_state_getter, _seed_setter, is_function_type, EPSILON,
)
from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance
from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot
from psyneulink.core.components.functions.stateful.integratorfunctions import StatefulFunction
from psyneulink.core.globals.context import handle_external_context
from psyneulink.core.globals.keywords import \
ADDITIVE_PARAM, BUFFER_FUNCTION, MEMORY_FUNCTION, COSINE, \
ContentAddressableMemory_FUNCTION, DictionaryMemory_FUNCTION, \
MIN_INDICATOR, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.utilities import \
all_within_range, convert_to_np_array, convert_to_list, convert_all_elements_to_np_array
__all__ = ['MemoryFunction', 'Buffer', 'DictionaryMemory', 'ContentAddressableMemory', 'RETRIEVAL_PROB', 'STORAGE_PROB']
class MemoryFunction(StatefulFunction): # -----------------------------------------------------------------------------
componentType = MEMORY_FUNCTION
class Buffer(MemoryFunction): # ------------------------------------------------------------------------------
"""
Buffer( \
default_variable=None, \
rate=1.0, \
noise=0.0, \
history=None, \
initializer, \
params=None, \
owner=None, \
prefs=None, \
)
.. _Buffer:
Append `variable <Buffer.variable>` to the end of `previous_value <Buffer.previous_value>` (i.e., right-append
to deque of previous inputs).
.. note::
Every appended item must have same shape as the first.
If specified, `rate <Buffer.rate>` and/or `noise <Buffer.noise>` are applied to items already stored in the
array, as follows:
.. math::
stored\\_item * rate + noise
.. note::
Because **rate** and **noise** are applied on every call, their effects accumulative exponentially over calls
to `function <Buffer.function>`.
If the length of the result exceeds `history <Buffer.history>`, delete the first item.
Return `previous_value <Buffer.previous_value>` appended with `variable <Buffer.variable>`.
Arguments
---------
default_variable : number, list or array : default class_defaults.variable
specifies a template for the value to be integrated; if it is a list or array, each element is independently
integrated.
rate : float, list or 1d array : default 1.0
specifies a value applied multiplicatively to each item already stored in the deque on each call to `function
<Buffer.function>`; must be in interval [0,1]
noise : float or Function : default 0.0
specifies a random value added to each item already in the deque on each call to `function <Buffer.function>`
(see `noise <Buffer.noise>` for details).
history : int : default None
specifies the maxlen of the deque, and hence `value <Buffer.value>`.
initializer float, list or ndarray : default []
specifies a starting value for the deque; if none is specified, the deque is initialized with an empty list.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
current input value appended to the end of the deque.
rate : float or 1d array with all elements in interval [0,1]
multiplicatively applied to each item already in the deque on call to `function <Buffer.function>`;
implements exponential decay of stored items.
noise : float or Function
random value added to each item of the deque in each call to `function <Buffer.function>`
(see `noise <Stateful_Noise>` for additional details).
history : int
determines maxlen of the deque and the value returned by the `function <Buffer.function>`. If appending
`variable <Buffer.variable>` to `previous_value <Buffer.previous_value>` exceeds history, the first item of
`previous_value <Buffer.previous_value>` is deleted, and `variable <Buffer.variable>` is appended to it,
so that `value <Buffer.previous_value>` maintains a constant length. If history is not specified,
the value returned continues to be extended indefinitely.
initializer : float, list or ndarray
value assigned as the first item of the deque when the Function is initialized, or reset
if the **new_previous_value** argument is not specified in the call to `reset
<StatefulFunction.reset>`.
previous_value : 1d array : default class_defaults.variable
state of the deque prior to appending `variable <Buffer.variable>` in the current call.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = BUFFER_FUNCTION
class Parameters(StatefulFunction.Parameters):
"""
Attributes
----------
history
see `history <Buffer.history>`
:default value: None
:type:
initializer
see `initializer <Buffer.initializer>`
:default value: numpy.array([], dtype=float64)
:type: ``numpy.ndarray``
noise
see `noise <Buffer.noise>`
:default value: 0.0
:type: ``float``
rate
see `rate <Buffer.rate>`
:default value: 1.0
:type: ``float``
"""
variable = Parameter([], pnl_internal=True, constructor_argument='default_variable')
rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
history = None
initializer = Parameter(np.array([]), pnl_internal=True)
@tc.typecheck
def __init__(self,
# FIX: 12/11/18 JDC - NOT SAFE TO SPECIFY A MUTABLE TYPE AS DEFAULT
default_variable=None,
# KAM 6/26/18 changed default param values because constructing a plain buffer function ("Buffer())
# was failing.
# For now, updated default_variable, noise, and Alternatively, we can change validation on
# default_variable=None, # Changed to [] because None conflicts with initializer
rate=None,
noise=None,
# rate:Optional[Union[int, float, np.ndarray]]=None,
# noise:Optional[Union[int, float, np.ndarray]]=None,
# rate: parameter_spec=1.0,
# noise: parameter_spec=0.0,
# rate: Optional[Union(int, float]] = None, # Changed to 1.0: None fails validation
# noise: Optional[Union[int, float, callable]] = None, # Changed to 0.0 - None fails validation
# rate: Optional[Union[int, float, list, np.ndarray]] = 1.0,
# noise: Optional[Union[int, float, list, np.ndarray, callable]] = 0.0,
history:tc.optional(int)=None,
# history: Optional[int] = None,
initializer=None,
params: tc.optional(dict) = None,
# params: Optional[dict] = None,
owner=None,
prefs: tc.optional(is_pref_set) = None
):
super().__init__(
default_variable=default_variable,
rate=rate,
initializer=initializer,
noise=noise,
history=history,
params=params,
owner=owner,
prefs=prefs,
)
def _initialize_previous_value(self, initializer, context=None):
previous_value = deque(initializer, maxlen=self.parameters.history.get(context))
self.parameters.previous_value.set(previous_value, context, override=True)
return previous_value
# TODO: Buffer variable fix: remove this or refactor to avoid skip
# of direct super
def _update_default_variable(self, new_default_variable, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value([np.zeros_like(new_default_variable)], context)
# bypass the additional _initialize_previous_value call used by
# other stateful functions
super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context)
def _instantiate_attributes_before_function(self, function=None, context=None):
self.parameters.previous_value._set(
self._initialize_previous_value(
self.parameters.initializer._get(context),
context
),
context
)
@handle_external_context(fallback_most_recent=True)
def reset(self, previous_value=None, context=None):
"""
Clears the `previous_value <Buffer.previous_value>` deque.
If an argument is passed into reset or if the `initializer <Buffer.initializer>` attribute contains a
value besides [], then that value is used to start the new `previous_value <Buffer.previous_value>` deque.
Otherwise, the new `previous_value <Buffer.previous_value>` deque starts out empty.
`value <Buffer.value>` takes on the same value as `previous_value <Buffer.previous_value>`.
"""
# no arguments were passed in -- use current values of initializer attributes
if previous_value is None:
previous_value = self._get_current_parameter_value("initializer", context)
if previous_value is None or previous_value == []:
self.parameters.previous_value._get(context).clear()
value = deque([], maxlen=self.parameters.history.get(context))
else:
value = self._initialize_previous_value(previous_value, context=context)
self.parameters.value.set(value, context, override=True)
return value
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number, list or array : default class_defaults.variable
a single value or array of values to be integrated.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
updated value of deque : deque
"""
rate = np.array(self._get_current_parameter_value(RATE, context)).astype(float)
# execute noise if it is a function
noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), variable, context=context)
# If this is an initialization run, leave deque empty (don't want to count it as an execution step);
# Just return current input (for validation).
if self.is_initializing:
return variable
previous_value = self.parameters.previous_value._get(context)
# Apply rate and/or noise, if they are specified, to all stored items
if len(previous_value):
# TODO: remove this shape hack when buffer shapes made consistent
noise = np.reshape(noise, np.asarray(previous_value[0]).shape)
previous_value = convert_to_np_array(previous_value) * rate + noise
previous_value = deque(previous_value, maxlen=self.parameters.history._get(context))
previous_value.append(variable)
self.parameters.previous_value._set(previous_value, context)
return self.convert_output_type(previous_value)
RETRIEVAL_PROB = 'retrieval_prob'
STORAGE_PROB = 'storage_prob'
DISTANCE_FUNCTION = 'distance_function'
SELECTION_FUNCTION = 'selection_function'
DISTANCE_FIELD_WEIGHTS = 'distance_field_weights'
equidistant_entries_select_keywords = [RANDOM, OLDEST, NEWEST]
class ContentAddressableMemory(MemoryFunction): # ---------------------------------------------------------------------
"""
ContentAddressableMemory( \
default_variable=None, \
retrieval_prob=1.0, \
storage_prob=1.0, \
rate=None, \
noise=0.0, \
initializer=None, \
distance_field_weights=None, \
distance_function=Distance(metric=COSINE), \
selection_function=OneHot(mode=MIN_VAL), \
duplicate_entries_allowed=False, \
duplicate_threshold=0, \
equidistant_entries_select=RANDOM, \
max_entries=None, \
params=None, \
owner=None, \
prefs=None, \
)
.. _ContentAddressableMemory:
**Sections**
- `Overview <ContentAddressableMemory_Overview>` \n
`Entries and Fields <ContentAddressableMemory_Entries_and_Fields>` \n
`Content-based Retrieval <ContentAddressableMemory_Retrieval>` \n
`Duplicate entries <ContentAddressableMemory_Duplicate_Entries>` \n
- `Structure <ContentAddressableMemory_Structure>`
- `Execution <ContentAddressableMemory_Execution>` \n
`Retrieval <ContentAddressableMemory_Execution_Retrieval>` \n
`Storage <ContentAddressableMemory_Execution_Storage>` \n
- `Examples <ContentAddressableMemory_Examples>`
- `Class Reference <ContentAddressableMemory_Class_Reference>`
.. _ContentAddressableMemory_Overview:
**Overview**
The ContentAddressableMemory `Function` implements a configurable, content-addressable storage and retrieval of
entries from `memory <ContentAddressableMemory.memory>`. Storage is determined by `storage_prob
<ContentAddressableMemory.storage_prob>`, and retrieval of entries is determined by
`distance_function <ContentAddressableMemory.distance_function>`,
`selection_function <ContentAddressableMemory.selection_function>`, and `retrieval_prob
<ContentAddressableMemory.retrieval_prob>`.
.. _ContentAddressableMemory_Entries_and_Fields:
**Entries and Fields**. The **default_variable** argument specifies the shape of an entry in `memory
<ContentAddressableMemory.storage_prob>`, each of which is a list or array of fields that are themselves lists or
1d arrays (see `EpisodicMemoryMechanism_Memory_Fields`). An entry can have an arbitrary number of fields, and
each field can have an arbitrary length. However, all entries must have the same number of fields, and the
corresponding fields must all have the same length across entries. Fields can be weighted to determine the
influence they have on retrieval, using the `distance_field_weights <ContentAddressableMemory.memory>` parameter
(see `retrieval <ContentAddressableMemory_Retrieval>` below).
.. hint::
Entries in `memory <ContentAddressableMemory.memory>` can be assigned "labels" -- i.e., values
that are not used in the calculation of distance -- by assigning them a weight of 0 or None in
`distance_field_weights <ContentAddressableMemory.memory>`); either can be used for labels that
are numeric values; however, if non-numeric values are assigned to a field as labels, then None
must be specified for that field in `distance_field_weights <ContentAddressableMemory.memory>`.
.. _ContentAddressableMemory_Retrieval:
**Retrieval**. Entries are retrieved from `memory <ContentAddressableMemory.memory>` based on their distance from
`variable <ContentAddressableMemory.variable>`, used as the cue for retrieval. The distance is computed using the
`distance_function <ContentAddressableMemory.distance_function>`, which compares `variable
<ContentAddressableMemory.variable>` with each entry in `memory <ContentAddressableMemory.storage_prob>` as full
vectors (i.e., with all fields of each concatenated into a single array), or by computing the distance of each
field in `variable <ContentAddressableMemory.variable>` with the corresponding ones of each entry in `memory
<ContentAddressableMemory.memory>`, and then averaging those distances, possibly weighted by coefficients specified
in `distance_field_weights <ContentAddressableMemory.distance_field_weights>`. The distances computed between
`variable `<ContentAddressableMemory.variable>` and each entry in `memory <ContentAddressableMemory.memory>` are
used by `selection_function <ContentAddressableMemory.selection_function>` to determine which entry is retrieved.
The distance used for the last retrieval (i.e., between `variable <ContentAddressableMemory.variable>` and the
entry retrieved), the distances of each of their corresponding fields (weighted by `distance_field_weights
<ContentAddressableMemory.distance_field_weights>`), and the distances to all other entries are stored in `distance
<ContentAddressableMemory.distance>` and `distances_by_field <ContentAddressableMemory.distances_by_field>`, and
`distances_to_entries <ContentAddressableMemory.distances_to_entries>` respectively.
.. _ContentAddressableMemory_Duplicate_Entries:
**Duplicate Entries**. These can be allowed, disallowed, or overwritten during storage using
`duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>`),
and how selection is made among duplicate entries or ones indistinguishable by the
`distance_function <ContentAddressableMemory.distance_function>` can be specified
using `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>`.
The class also provides methods for directly retrieving (`get_memory
<ContentAddressableMemory.get_memory>`), adding (`add_to_memory <ContentAddressableMemory.add_to_memory>`)
and deleting (`delete_from_memory <ContentAddressableMemory.delete_from_memory>`) one or more entries from
`memory <ContentAddressableMemory.memory>`.
.. _ContentAddressableMemory_Structure:
**Structure**
An entry is stored and retrieved as an array containing a set of `fields <EpisodicMemoryMechanism_Memory_Fields>`
each of which is a 1d array. An array containing such entries can be used to initialize the contents of `memory
<ContentAddressableMemory.memory>` by providing it in the **initializer** argument of the ContentAddressableMemory's
constructor, or in a call to its `reset <ContentAddressableMemory.reset>` method. The current contents of `memory
<ContentAddressableMemory.memory>` can be inspected using the `memory <ContentAddressableMemory.memory>` attribute,
which returns a list containing the current entries, each as a list containing all fields for that entry. The
`memory_num_fields <ContentAddressableMemory.memory_num_fields>` contains the number of fields expected for each
entry, `memory_field_shapes <ContentAddressableMemory.memory_field_shapes>` their shapes, and `memory_num_entries
<ContentAddressableMemory.memory_num_entries>` the total number of entries in `memory
<ContentAddressableMemory.memory>`.
.. _ContentAddressableMemory_Shapes:
.. technical_note::
Both `memory <ContentAddressableMemory.memory>` and all entries are stored as np.ndarrays, the dimensionality of
which is determined by the shape of the fields of an entry. If all fields have the same length (regular), then
they are 2d arrays and `memory <ContentAddressableMemory.memory>` is a 3d array. However, if fields vary in
length (`ragged <https://en.wikipedia.org/wiki/Jagged_array>`_) then, although each field is 1d, an entry is
also 1d (with dtype='object'), and `memory <ContentAddressableMemory.memory>` is 2d (with dtype='object').
.. _ContentAddressableMemory_Execution:
**Execution**
When the ContentAddressableMemory function is executed, it first retrieves the entry in `memory
<ContentAddressableMemory.memory>` that most closely matches `variable
<ContentAddressableMemory.variable>` in the call, stores the latter in `memory <ContentAddressableMemory.memory>`,
and returns the retrieved entry. If `variable <ContentAddressableMemory.variable>` is an exact match of an entry
in `memory <ContentAddressableMemory.memory>`, and `duplicate_entries_allowed
<ContentAddressableMemory.duplicate_entries_allowed>` is False, then the matching item is returned, but `variable
<ContentAddressableMemory.variable>` is not stored. These steps are described in more detail below.
.. _ContentAddressableMemory_Execution_Retrieval:
* **Retrieval:** first, with probability `retrieval_prob <ContentAddressableMemory.retrieval_prob>`,
the entry closest to `variable <ContentAddressableMemory.variable>` is retrieved from is retrieved from `memory
<ContentAddressableMemory.memory>`. The entry is chosen by calling, in order:
* `distance_function <ContentAddressableMemory.distance_function>`\: generates a list of and compares
`distances <ContentAddressableMemory.distances>` between `variable <ContentAddressableMemory.variable>`
and each entry in `memory <ContentAddressableMemory.memory>`, possibly weighted by `distance_field_weights
<ContentAddressableMemory.distance_field_weights>`, as follows:
.. _ContentAddressableMemory_Distance_Field_Weights:
* if `distance_field_weights <ContentAddressableMemory.distance_field_weights>` is either a scalar or an
array of scalars that are all the same, then it is used simply to scale the distance computed between
`variable <ContentAddressableMemory.variable>` and each entry in `memory <ContentAddressableMemory.memory>`,
each of which is computed by concatenating all items of `variable <ContentAddressableMemory.variable>` into
a 1d array, similarly concatenating all `memory_fields <EpisodicMemoryMechanism_Memory_Fields>` of an
entry in `memory <ContentAddressableMemory.memory>`, and then using `distance_function
<ContentAddressableMemory.distance_function>` to compute the distance betwen them.
* if `distance_field_weights <ContentAddressableMemory.distance_field_weights>` is an array of scalars with
different values, then `variable <ContentAddressableMemory.variable>` is compared with each entry in `memory
<ContentAddressableMemory.memory>` by using `distance_function <ContentAddressableMemory.distance_function>`
to compute the distance of each item in `variable <ContentAddressableMemory.variable>` with the
corresponding field of the entry in memory, and then averaging those distances weighted by the
corresponding element of `distance_field_weights<ContentAddressableMemory.distance_field_weights>`.
.. note::
Fields assigned a weight of *0* or *None* are ignored in the distance calculation; that is, the distances
between `variable <ContentAddressableMemory.variable>` and entries for those fields are not included
in the averaging of distances by field.
* `selection_function <ContentAddressableMemory.selection_function>`\: called with the list of distances
to determine which entries to select for consideration. If more than on entry from `memory
<ContentAddressableMemory.memory>` is identified, `equidistant_entries_select
<ContentAddressableMemory.equidistant_entries_select>` is used to determine which to retrieve. If no
retrieval occurs, an appropriately shaped zero-valued array is assigned as the retrieved memory, and
returned by the function.
The distance between `variable <ContentAddressableMemory.variable>` and the retrieved entry is assigned to
`distance `<ContentAddressableMemory.distance>`, the distance between of each of their fields is assigned to
`distances_by_field <ContentAddressableMemory.distances_by_field>`, and the distances of `variable
<ContentAddressableMemory.variable>` to all entries in `memory <ContentAddressableMemory.memory>` is assigned
to `distances_to_entries <ContentAddressableMemory.distances_to_entries>`.
.. _ContentAddressableMemory_Execution_Storage:
* **Storage:** after retrieval, an attempt is made to store `variable <ContentAddressableMemory.variable>`
in `memory memory <ContentAddressableMemory.memory>` with probability `storage_prob
<ContentAddressableMemory.storage_prob>`; if the attempt is made:
* if `variable <ContentAddressableMemory.variable>` is identical to an entry already in `memory
<ContentAddressableMemory.memory>`, as evaluated by
`distance_function <ContentAddressableMemory.distance_function>` and `duplicate_threshold
<ContentAddressableMemory.duplicate_threshold>`, then `duplicate_entries_allowed
<ContentAddressableMemory.duplicate_entries_allowed>` determines whether or not to store the entry;
if `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is:
* False -- storage is skipped;
* True -- `variable <ContentAddressableMemory.variable>` is stored as another duplicate;
* *OVERWRITE* -- the duplicate entry in `memory <ContentAddressableMemory.memory>` is replaced with
`variable <ContentAddressableMemory.variable>` (which may be slightly different than the item it
replaces, within the tolerance of `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`),
and the matching entry is returned;
.. note::
If `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>` is OVERWRITE but
a duplicate entry is nevertheless identified during retrieval (e.g., **duplicate_entries_allowed** was
previously changed from True to False), a warning is issued, and duplicate entry is overwritten with
`variable <ContentAddressableMemory.variable>`.
* if storage **rate** and/or **noise** arguments are specified in the constructor, they are
applied to `variable <ContentAddressableMemory.variable>` before storage as :math:`variable * rate + noise`;
* finally, if the number of entries in `memory <ContentAddressableMemory.memory>` exceeds `max_entries
<ContentAddressableMemory.max_entries>`, the first (oldest) entry is deleted. The current number of entries
in memory is contained in the `memory_num_entries <ContentAddressableMemory.memory_num_entries>` attribute.
.. _ContentAddressableMemory_Examples:
**Examples**
*Initialize memory with **default_variable*
The format for entries in `memory <ContentAddressableMemory.memory` can be specified using either the
**default_variable** or **initializer** arguments of the Function's constructor. **default_variable** specifies
the shape of entries, without creating any entries::
>>> c = ContentAddressableMemory(default_variable=[[0,0],[0,0,0]])
>>> c([[1,2]])
[array([0, 0])]
Since `memory <ContentAddressableMemory.memory>` was not intialized, the first call to the Function returns an
array of zeros, formatted as specified in **defaul_variable**. However, the input in the call to the Function
(``[[1,2]]``) is stored as an entry in `memory <EpisodicMemoryMechanism.memory>`::
>>> c.memory
array([[[1., 2.]]])
and is returned on the next call::
>>> c([[2,5]])
array([[1., 2.]])
Note that even though **default_variable** and the inputs to the Function are specified as lists, the entries
returned are arrays; `memory <ContentAddressableMemory.memory>` and all of its entries are always formated as
arrays.
*Initialize memory with **initializer*
The **initializer** argument of a ContentAddressableMemory's constructor can be used to initialize its `memory
<ContentAddressableMemory.memory>`::
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4,5]],
... [[10,9],[8,7,6]]])
>>> c([[1,2],[3,4,6]])
array([array([1., 2.]), array([3., 4., 5.])], dtype=object)
>>> c([[1,2],[3,4,6]])
array([array([1., 2.]), array([3., 4., 6.])], dtype=object)
Note that there was no need to use **default_variable**, and in fact it would overidden if specified.
.. _ContentAddressableMemory_Examples_Weighting_Fields:
*Weighting fields*
The **distance_field_weights** argument can be used to differentially weight memory fields to modify their
influence on retrieval (see `distance_field_weights <ContentAddressableMemory_Distance_Field_Weights>`). For
example, this can be used to configure the Function as a dictionary, using the first field for keys (on which
retrieval is based) and the second for values (that are retrieved with a matching key), as follows:
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]],
... [[1,5],[10,11]]],
... distance_field_weights=[1,0])
>>> c([[1,2.5],[10,11]])
array([[1., 2.],
[3., 4.]])
Note that the first entry ``[[1,2],[3,4]]`` in `memory <ContentAddressableMemory.memory>` was retrieved,
even though the cue used in the call (``[[1,2.5],[10,11]]``) was an exact match to the second field of the
second entry (``[[1,5],[10,11]]``). However, since that field was assigned 0 in **distance_field_weights**,
it was ignored and, using only the first entry, the cue was closer to the first entry. This is confirmed by
repeating the example without specifying **distance_field_weights**::
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]],
... [[1,5],[10,11]]])
>>> c([[1,2.5],[10,11]])
array([[ 1., 5.],
[10., 11.]])
COMMENT:
# FIX: ADD EXAMPLES FOR ENTRIES WITH DIFFERENT SHAPES
COMMENT
*Duplicate entries*
By default, duplicate entries are precluded from a ContentAddressableMemory's `memory
<ContentAddressableMemory.memory>`. So, for an initializer with identical entries, only one copy of
the duplicates will be stored::
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]],
... [[1,2],[3,4]]])
>>> c.memory
array([[[1., 2.],
[3., 4.]]])
and using the same array as input to the function will retrieve that array but not store another copy::
>>> c([[1,2],[3,4]])
array([[1., 2.],
[3., 4.]])
>>> c.memory
array([[[1., 2.],
[3., 4.]]])
Only fields with non-zero weights in `distance_field_weights <ContentAddressableMemory.distance_field_weights>`
are considered when evaluating whether entries are duplicates. So, in the following example, where the weight
for the second field is 0, the two entries are considered duplicates and only the first is stored::
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]],
... [[1,2],[5,6]]],
... distance_field_weights=[1,0])
>>> c.memory
array([[[1., 2.],
[3., 4.]]])
Duplicates can be allowed by setting the **duplicate_entries_allowed** argument to True or *OVERWRITE*. Setting
it to True allows duplicate entries to accumulate in `memory <ContentAddressableMemory.memory>`, as shown
here::
>>> c = ContentAddressableMemory(initializer=[[[1,2],[3,4]],
... [[1,5],[10,11]]],
... duplicate_entries_allowed=True)
>>> c([[1,2],[3,4]])
array([[1., 2.],
[3., 4.]])
>>> c.memory
array([[[ 1., 2.],
[ 3., 4.]],
<BLANKLINE>
[[ 1., 5.],
[10., 11.]],
<BLANKLINE>
[[ 1., 2.],
[ 3., 4.]]])
Duplicates are determined by comparing entries using the functions `distance_function
<ContentAddressableMemory.distance_function>`; if the `distance <ContentAddressableMemory.distance>`
is less than `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`, they are considered to be
duplicates; otherwise they are treated a distinct entries. By default, `duplicate_threshold
<ContentAddressableMemory.duplicate_threshold>` is 0. In the folloiwng example it is increased, so that
two very similar, but non-identical entries, are nonetheless treated as duplicates::
>>> c = ContentAddressableMemory(initializer=[[[1, 2.0], [3, 4]],
... [[1, 2.5], [3, 4]]],
... duplicate_entries_allowed=False,
... duplicate_threshold=0.2)
>>> c.memory
array([[[1., 2.],
[3., 4.]]])
Setting **duplicate_entries_allowed** argument to *OVERWRITE* allows an entry to replace one that is considered
duplicate, even if it is not identical, as in the following example::
>>> c.duplicate_entries_allowed=OVERWRITE
>>> c([[1, 2.1], [3, 4]])
array([[1., 2.],
[3., 4.]])
>>> c.memory
array([[[1. , 2.1],
[3. , 4. ]]])
Note that the entry considered to be the duplicate (``[[1, 2.1], [3, 4]]``) is returned, and then replaced in
`memory <ContentAddressableMemory.memory>`. Finally, if `duplicate_entries_allowed
<ContentAddressableMemory.duplicate_entries_allowed>` is True, and duplicates have accumulated, the
`equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>` attribute can be used to
specify how to select among them for retrieval, either by chosing randomly (*RANDOM*) or selecting either the
first one (*OLDEST*) or last one (*NEWEST*) stored.
.. _ContentAddressableMemory_Class_Reference:
**Class Reference**
Arguments
---------
default_variable : list or 2d array : default class_defaults.variable
specifies a template for an entry in the dictionary; the list or array can have any number of items,
each of which must be a list or array of any length; however, at present entries are constrained to be
at most 2d.
retrieval_prob : float in interval [0,1] : default 1.0
specifies probability of retrieving an entry from `memory <ContentAddressableMemory.memory>`.
storage_prob : float in interval [0,1] : default 1.0
specifies probability of adding `variable <ContentAddressableMemory.variable>` to `memory
<ContentAddressableMemory.memory>`.
rate : float, list, or array : default 1.0
specifies a value used to multiply `variable <ContentAddressableMemory.variable>` before storing in
`memory <ContentAddressableMemory.memory>` (see `rate <ContentAddressableMemory.rate>` for details).
noise : float, list, 2d array, or Function : default 0.0
specifies random value(s) added to `variable <ContentAddressableMemory.variable>` before storing in
`memory <ContentAddressableMemory.memory>`\; if a list or 2d array, it must be the same shape as `variable
ContentAddressableMemory.variable>` (see `noise <ContentAddressableMemory.noise>` for details).
initializer : 3d array or list : default None
specifies an initial set of entries for `memory <ContentAddressableMemory.memory>` (see
`initializer <ContentAddressableMemory.initializer>` for additional details).
distance_field_weights : 1d array : default None
specifies the weight to use in computing the distance between each item of `variable
<ContentAddressableMemory.variable>` and the corresponding `memory_field
<EpisodicMemoryMechanism_Memory_Fields>` of each item in `memory <ContentAddressableMemory.memory>` (see
`distance_field_weights <ContentAddressableMemory.distance_field_weights>` for additional details).
distance_function : Distance or function : default Distance(metric=COSINE)
specifies the function used during retrieval to compare `variable <ContentAddressableMemory.variable>` with
entries in `memory <ContentAddressableMemory.memory>`.
selection_function : OneHot or function : default OneHot(mode=MIN_VAL)
specifies the function used during retrieval to evaluate the distances returned by `distance_function
<ContentAddressableMemory.distance_function>` and select the item to retrieve.
duplicate_entries_allowed : bool : default False
specifies whether duplicate entries are allowed in `memory <ContentAddressableMemory.memory>`
(see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for additional details).
duplicate_threshold : float : default 0
specifies how similar `variable <ContentAddressableMemory.variable>` must be to an entry in `memory
<ContentAddressableMemory.memory>` based on `distance_function <ContentAddressableMemory.distance_function>` to
be considered a duplicate (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>`
for additional details).
equidistant_entries_select : RANDOM | OLDEST | NEWEST : default RANDOM
specifies which entry in `memory <ContentAddressableMemory.memory>` is chosen for retrieval if two or more
have the same distance from `variable <ContentAddressableMemory.variable>`.
max_entries : int : default None
specifies the maximum number of entries allowed in `memory <ContentAddressableMemory.memory>`
(see `max_entries <ContentAddressableMemory.max_entries>` for additional details).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 2d array
used to retrieve an entry from `memory <ContentAddressableMemory.memory>`, and then stored there.
retrieval_prob : float in interval [0,1]
probability of retrieiving a value from `memory <ContentAddressableMemory.memory>`.
storage_prob : float in interval [0,1]
probability of adding `variable <ContentAddressableMemory.variable>` to `memory
<ContentAddressableMemory.memory>`.
.. note::
storage_prob does not apply to `initializer <ContentAddressableMemory,initializer>`, the entries of
which are added to `memory <ContentAddressableMemory.memory>` irrespective of storage_prob.
rate : float or 1d array
value applied multiplicatively to `variable <ContentAddressableMemory.variable>`) before storing
in`memory <ContentAddressableMemory.memory>` (see `rate <Stateful_Rate>` for additional details).
noise : float, 2d array or Function
value added to `variable <ContentAddressableMemory.variable>`) before storing in
`memory <ContentAddressableMemory.memory>` (see `noise <Stateful_Noise>` for additional details).
If a 2d array (or `Function` that returns one), its shape must be the same as `variable
<ContentAddressableMemory.variable>`; that is, each array in the outer dimension (Axis 0) must have the
same length as the corresponding one in `variable <ContentAddressableMemory.variable>`, so that it
can be added Hadamard style to `variable <ContentAddressableMemory.variable>` before storing it in
`memory <ContentAddressableMemory.memory>`.
initializer : ndarray
initial set of entries for `memory <ContentAddressableMemory.memory>`. It should be either a 3d regular
array or a 2d ragged array (if the fields of an entry have different lengths), but it can be specified
in the **initializer** argument of the constructor using some simpler forms for convenience. Specifically,
scalars, 1d and regular 2d arrays are allowed, which are interpreted as a single entry that is converted to
a 3d array to initialize `memory <ContentAddressableMemory.memory>`.
memory : list
list of entries in ContentAddressableMemory, each of which is an array of fields containing stored items;
the fields of an entry must be lists or arrays, each of which can be different shapes, but the corresponding
fields of all entries must have the same shape; for example, the following could be a pair of entries in
memory:
+-------------+------------------------------+--------------------------------------------+
| entry 1 | entry 2 |
+-------------+--------------+---------------+-----------+--------------+-----------------+
| field1 | field2 | field3 | field1 | field2 | field3 |
+-------------+--------------+---------------+-----------+--------------+-----------------+
| [[ [a], | [b,c,d], | [[e],[f]] ], | [ [u], | [v,w,x], | [[y],[z]] ]] |
+-------------+--------------+---------------+-----------+--------------+-----------------+
distance_field_weights : 1d array : default None
determines the weight used in computing the distance between each item of `variable
<ContentAddressableMemory.variable>` and the corresponding `memory_field
<EpisodicMemoryMechanism_Memory_Fields>` of each entry in `memory <ContentAddressableMemory.memory>`; if all
elements are identical, it is treated as a scalar coefficient on `distance <ContentAddressableMemory.distance>`
(see `ContentAddressableMemory_Distance_Field_Weights` for additional details).
distance_function : Distance or function : default Distance(metric=COSINE)
function used during retrieval to compare `variable <ContentAddressableMemory.variable>` with entries in
`memory <ContentAddressableMemory.memory>`.
distance : float : default 0
contains distance used for retrieval last cue to last entry returned in a given `context <Context>`.
distances_by_field : array : default [0]
contains array of distances between each `memory field <ContentAddressableMemory_Memory_Fields>`
of the last cue and the corresponding ones of the last entry returned in a given `context <Context>`.
distances_to_entries : array : default [0]
contains array of distances between last cue retrieved in a given `context <Context>` an all entries at that
time.
memory_num_entries : int
contains the number of entries in `memory <ContentAddressableMemory.memory>`.
memory_num_fields : int
contains the number of `memory fields <EpisodicMemoryMechanism_Memory_Fields>` in each entry of `memory
<ContentAddressableMemory.memory>`.
memory_field_shapes : array
contains the shapes of each `memory field <EpisodicMemoryMechanism_Memory_Fields>` in each entry of `memory
<ContentAddressableMemory.memory>`.
selection_function : OneHot or function
function used during retrieval to evaluate the distances returned by `distance_function
<ContentAddressableMemory.distance_function>` and select the item(s) to return.
duplicate_entries_allowed : bool | OVERWRITE
determines whether duplicate entries are allowed in `memory <ContentAddressableMemory.memory>`,
as evaluated by `distance_function <ContentAddressableMemory.distance_function>` and `duplicate_threshold
<ContentAddressableMemory.duplicate_threshold>`. (see `duplicate entries
<ContentAddressableMemory_Duplicate_Entries>` for additional details).
duplicate_threshold : float
determines how similar `variable <ContentAddressableMemory.variable>` must be to an entry in `memory
`<ContentAddressableMemory.memory>` based on `distance_function <ContentAddressableMemory.distance_function>`
to be considered a duplicate (see `duplicate entries <ContentAddressableMemory_Duplicate_Entries>` for
additional details).
equidistant_entries_select: RANDOM | OLDEST | NEWEST
determines which entry is retrieved when duplicate entries are identified or are indistinguishable by the
`distance_function <ContentAddressableMemory.distance_function>`.
max_entries : int
maximum number of entries allowed in `memory <ContentAddressableMemory.memory>`; if storing a memory
exceeds the number, the oldest memory is deleted.
previous_value : ndarray
state of the `memory <ContentAddressableMemory.memory>` prior to storing `variable
<ContentAddressableMemory.variable>` in the current call.
random_state : numpy.RandomState
private pseudorandom number generator
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
Returns
-------
entry from `memory <ContentAddressableMemory.memory>` that best matches `variable
<ContentAddressableMemory.variable>` : 2d array
if no retrieval occurs, an appropriately shaped zero-valued array is returned.
"""
componentName = ContentAddressableMemory_FUNCTION
class Parameters(StatefulFunction.Parameters):
"""
Attributes
----------
variable
see `variable <ContentAddressableMemory.variable>`
:default value: [[0], [0]]
:type: ``list``
distance
see `distance <ContentAddressableMemory.distance>`
:default value: 0
:type: ``float``
distance_field_weights
see `distance_field_weights <ContentAddressableMemory.distance_field_weights>`
:default value: [1]
:type: ``numpy.ndarray``
distance_function
see `distance_function <ContentAddressableMemory.distance_function>`
:default value: Distance(metric=COSINE)
:type: ``Function``
distances_by_field
see `distances_by_field <ContentAddressableMemory.distances_by_field>`
:default value: [0]
:type: ``numpy.ndarray``
distances_to_entries
see `distances_to_entries <ContentAddressableMemory.distances_to_entries>`
:default value: [0]
:type: ``numpy.ndarray``
duplicate_entries_allowed
see `duplicate_entries_allowed <ContentAddressableMemory.duplicate_entries_allowed>`
:default value: False
:type: ``bool or OVERWRITE``
duplicate_threshold
see `duplicate_threshold <ContentAddressableMemory.duplicate_threshold>`
:default value: 0
:type: ``float``
equidistant_entries_select
see `equidistant_entries_select <ContentAddressableMemory.equidistant_entries_select>`
:default value: `RANDOM`
:type: ``str``
memory_num_fields
see `memory_num_fields <ContentAddressableMemory.memory_num_fields>`
:default value: 1
:type: ``int``
memory_field_shapes
see `memory_field_shapes <ContentAddressableMemory.memory_field_shapes>`
:default value: [1]
:type: ``numpy.ndarray``
initializer
see `initializer <ContentAddressableMemory.initializer>`
:default value: None
:type: ``numpy.ndarray``
max_entries
see `max_entries <ContentAddressableMemory.max_entries>`
:default value: 1000
:type: ``int``
noise
see `noise <ContentAddressableMemory.noise>`
:default value: 0.0
:type: ``float``
previous_value
see `previous_value <ContentAddressableMemory.previous_value>`
:default value: None
:type: ``numpy.ndarray``
random_state
see `random_state <ContentAddressableMemory.random_state>`
:default value: None
:type: ``numpy.random.RandomState``
rate
see `rate <ContentAddressableMemory.rate>`
:default value: 1.0
:type: ``float``
retrieval_prob
see `retrieval_prob <ContentAddressableMemory.retrieval_prob>`
:default value: 1.0
:type: ``float``
selection_function
see `selection_function <ContentAddressableMemory.selection_function>`
:default value: `OneHot`(mode=MIN_INDICATOR)
:type: `Function`
storage_prob
see `storage_prob <ContentAddressableMemory.storage_prob>`
:default value: 1.0
:type: ``float``
val_size
see `val_size <ContentAddressableMemory.val_size>`
:default value: 1
:type: ``int``
"""
variable = Parameter([[0],[0]], pnl_internal=True, constructor_argument='default_variable')
initializer = Parameter(None, pnl_internal=True)
previous_value = Parameter(None, initializer='initializer', pnl_internal=True)
retrieval_prob = Parameter(1.0, modulable=True)
storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
# FIX: MAKE THESE ATTRIBUTES RATHER THAN PARAMETERS:
memory_num_fields = Parameter(None, stateful=False, read_only=True)
memory_field_shapes = Parameter(None, stateful=False, read_only=True)
# FIX: --------------------
distance_field_weights = Parameter([1], stateful=True, modulable=True, dependencies='initializer')
duplicate_entries_allowed = Parameter(False, stateful=True)
duplicate_threshold = Parameter(EPSILON, stateful=False, modulable=True)
equidistant_entries_select = Parameter(RANDOM)
rate = Parameter(1.0, modulable=True)
noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
max_entries = Parameter(1000)
random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed')
seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter)
distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False)
selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False)
distance = Parameter(0, stateful=True, read_only=True)
distances_by_field = Parameter([0], stateful=True, read_only=True)
distances_to_entries = Parameter([0], stateful=True, read_only=True)
def _validate_retrieval_prob(self, retrieval_prob):
retrieval_prob = float(retrieval_prob)
if not all_within_range(retrieval_prob, 0, 1):
return f"must be a float in the interval [0,1]."
def _validate_storage_prob(self, storage_prob):
storage_prob = float(storage_prob)
if not all_within_range(storage_prob, 0, 1):
return f"must be a float in the interval [0,1]."
def _validate_distance_field_weights(self, field_weights):
if self.distance_field_weights._user_specified is True and self.initializer.default_value is not None:
field_weights = np.array(field_weights)
if not np.isscalar(field_weights) and field_weights.ndim != 1:
return f"must be a scalar or list or 1d array of scalars"
fw_len = len(field_weights)
num_fields = convert_all_elements_to_np_array(self.initializer.default_value).shape[1]
if len(field_weights) not in {1, num_fields}:
return f"length ({fw_len}) must be same as number of fields " \
f"in entries of initializer ({num_fields})."
if not np.any(field_weights):
warnings.warn(f"All weights in the 'distance_fields_weights' Parameter of {self._owner.name} "
f"are set to '0', so all entries of its memory will be treated as duplicates.")
def _validate_equidistant_entries_select(self, equidistant_entries_select):
if equidistant_entries_select not in equidistant_entries_select_keywords:
return f"must be {' or '.join(equidistant_entries_select_keywords)}."
def _validate_duplicate_entries_allowed(self, duplicate_entries_allowed):
if not isinstance(duplicate_entries_allowed, bool) and duplicate_entries_allowed != OVERWRITE:
return f"must be a bool or 'OVERWRITE'."
def _validate_initializer(self, initializer):
pass
def _parse_initializer(self, initializer):
if initializer is not None:
initializer = ContentAddressableMemory._enforce_memory_shape(initializer)
return initializer
@tc.typecheck
def __init__(self,
# FIX: REINSTATE WHEN 3.6 IS RETIRED:
# default_variable=None,
# retrieval_prob: Optional[Union[int, float]]=None,
# storage_prob: Optional[Union[int, float]]=None,
# rate: Optional[Union[int, float, list, np.ndarray]]=None,
# noise: Optional[Union[int, float, list, np.ndarray, callable]]=None,
# initializer:Optional[Union[list, np.ndarray]]=None,
# distance_field_weights:Optional[Union[list, np.ndarray]]=None,
# distance_function:Optional[Union[Distance, is_function_type]]=None,
# selection_function:Optional[Union[OneHot, is_function_type]]=None,
# duplicate_entries_allowed:Optional[Union[(bool, Literal[OVERWRITE]]]=None,
# duplicate_threshold:Optional[int]=None,
# equidistant_entries_select:Optional[Literal[Union[RANDOM, OLDEST, NEWEST]]]=None,
# max_entries:Optional[int]=None,
# seed:Optional[int]=None,
# params:Optional[Union[list, np.ndarray]]=None,
# owner=None,
# prefs:tc.optional(is_pref_set)=None):
default_variable=None,
retrieval_prob=None,
storage_prob=None,
rate=None,
noise=None,
initializer=None,
distance_field_weights=None,
distance_function=None,
selection_function=None,
duplicate_entries_allowed=None,
duplicate_threshold=None,
equidistant_entries_select=None,
max_entries=None,
seed=None,
params=None,
owner=None,
prefs:tc.optional(is_pref_set)=None):
self._memory = []
super().__init__(
default_variable=default_variable,
retrieval_prob=retrieval_prob,
storage_prob=storage_prob,
initializer=initializer,
duplicate_entries_allowed=duplicate_entries_allowed,
duplicate_threshold=duplicate_threshold,
equidistant_entries_select=equidistant_entries_select,
distance_function=distance_function,
distance_field_weights=distance_field_weights,
rate=rate,
noise=noise,
max_entries=max_entries,
seed=seed,
params=params,
owner=owner,
prefs=prefs,
)
if self.previous_value is not None:
self.parameters.memory_num_fields.set(self.previous_value.shape[1], override=True)
self.parameters.memory_field_shapes.set([item.shape for item in self.previous_value[0]], override=True)
def _parse_selection_function_variable(self, variable, context=None):
# this should be replaced in the future with the variable
# argument when function ordering (and so ordering of parsers)
# is made explicit
distance_result = self.distance_function.parameters.value._get(context)
# TEST PRINT:
# print(distance_result, self.distance_function.defaults.value)
return np.asfarray([
distance_result if i == 0 else np.zeros_like(distance_result)
for i in range(self.defaults.max_entries)
])
def _validate(self, context=None):
"""Validate distance_function, selection_function and memory store"""
distance_function = self.distance_function
if self.get_previous_value(context) is not None:
test_var = self.get_previous_value(context)[0]
else:
test_var = self.defaults.variable
if isinstance(distance_function, type):
distance_function = distance_function(default_variable=test_var)
fct_msg = 'Function type'
else:
distance_function.defaults.variable = [test_var,test_var]
distance_function._instantiate_value(context)
fct_msg = 'Function'
if (isinstance(distance_function, Distance)
and distance_function.metric == COSINE
and any([len(v)==1 for v in test_var])):
warnings.warn(f"{self.__class__.__name__} is using {distance_function} with metric=COSINE and has "
f"at least one memory field that is a scalar (i.e., size=1), which will always produce "
f"a distance of 0 (the angle of scalars is not defined).")
field_wts_homog = np.full(len(test_var),1).tolist()
field_wts_heterog = np.full(len(test_var),range(0,len(test_var))).tolist()
for granularity, field_weights in product(['full_entry', 'per_field'],[field_wts_homog, field_wts_heterog]):
try:
distance_result = self._get_distance(test_var, test_var, field_weights, granularity, context=context)
except:
raise FunctionError(f"{fct_msg} specified for {repr(DISTANCE_FUNCTION)} arg of "
f"{self.__class__.__name__} ({distance_function}) must accept an array "
f"with two lists or 1d arrays, or a 2d array, as its argument.")
if granularity == 'full_entry' and not np.isscalar(distance_result):
raise FunctionError(f"Value returned by {repr(DISTANCE_FUNCTION)} "
f"({distance_function.__class__.__name__}) specified for "
f"{self.__class__.__name__} must return a scalar if "
f"{repr(DISTANCE_FIELD_WEIGHTS)} is not specified or is homogenous "
f"(i.e., all elements are the same.")
if granularity == 'per_field' and not len(distance_result)==len(field_weights):
raise FunctionError(f"Value returned by {repr(DISTANCE_FUNCTION)} "
f"({distance_function.__class__.__name__}) specified for "
f"{self.__class__.__name__} must return an array "
f"if {repr(DISTANCE_FIELD_WEIGHTS)} is a non-homogenous list or array"
f"(i.e., not all elements are the same.")
# FIX: 4/5/21 SHOULD VALIDATE NOISE AND RATE HERE AS WELL?
# Default to full memory
selection_function = self.selection_function
test_var = np.asfarray([distance_result if i==0
else np.zeros_like(distance_result)
for i in range(self._get_current_parameter_value('max_entries', context))])
if isinstance(selection_function, type):
selection_function = selection_function(default_variable=test_var, context=context)
fct_string = 'Function type'
else:
selection_function.defaults.variable = test_var
selection_function._instantiate_value(context)
fct_string = 'Function'
try:
result = np.asarray(selection_function(test_var, context=context))
except e:
raise FunctionError(f'{fct_string} specified for {repr(SELECTION_FUNCTION)} arg of {self.__class__} '
f'({selection_function}) must accept a 1d array as its argument')
if result.shape != test_var.shape:
raise FunctionError(f'Value returned by {repr(SELECTION_FUNCTION)} specified for {self.__class__} '
f'({result}) must return an array of the same length it receives')
@handle_external_context()
def _update_default_variable(self, new_default_variable, context=None):
"""Override method on parent (StatefulFunction) since it can't handle arbitrarily-shaped fields"""
if not self.parameters.initializer._user_specified and self.parameters.variable._user_specified:
new_default_variable = self.parameters.variable.default_value
super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context)
def _initialize_previous_value(self, initializer, context=None):
"""Ensure that initializer is appropriate for assignment as memory attribute and assign as previous_value
If specified and it is the first entry:
- set memory_num_fields and memory_field_shapes based on initializer
- use to set previous_value (and return previous_value)
(must be done here rather than in validate_params as it is needed to initialize previous_value
"""
if initializer is None or convert_all_elements_to_np_array(initializer).size == 0:
return None
# FIX: HOW DOES THIS RELATE TO WHAT IS DONE IN __init__()?
# Set memory fields shapes if this is the first entry
self.parameters.memory_num_fields.set(initializer.shape[1],
context=context, override=True)
self.parameters.memory_field_shapes.set([item.shape for item in initializer[0]],
context=context, override=True)
self.parameters.previous_value.set(None, context, override=True)
for entry in initializer:
# Store each item, which also validates it by call to _validate_entry()
if not self._store_memory(entry, context):
warnings.warn(f"Attempt to initialize memory of {self.__class__.__name__} with an entry ({entry}) "
f"that is identical to an existing one while 'duplicate_entries_allowed'==False; "
f"that entry has been skipped")
previous_value = self._memory
self.parameters.previous_value.set(previous_value, context, override=True)
return previous_value
def _instantiate_attributes_before_function(self, function=None, context=None):
self._initialize_previous_value(self.parameters.initializer._get(context), context)
if isinstance(self.distance_function, type):
self.distance_function = self.distance_function(context=context)
if isinstance(self.selection_function, type):
self.selection_function = self.selection_function(context=context)
@handle_external_context(fallback_most_recent=True)
def reset(self, new_value=None, context=None):
"""
reset(<new_dictionary> default={})
Clears the memory in `previous_value <ContentAddressableMemory.previous_value>`.
If **new_value** is passed into reset or if the `initializer <ContentAddressableMemory.initializer>`
attribute contains a value besides [], then that value is used to start the new memory in `previous_value
<ContentAddressableMemory.previous_value>`. Otherwise, the new `previous_value
<ContentAddressableMemory.previous_value>` memory starts out as None.
`value <ContentAddressableMemory.value>` takes on the same value as
`previous_value <ContentAddressableMemory.previous_value>`.
"""
if new_value is not None:
value = self._initialize_previous_value(ContentAddressableMemory._enforce_memory_shape(new_value),
context=context)
else:
# no arguments were passed in -- use current values of initializer attributes
initializer = self._get_current_parameter_value("initializer", context)
if initializer is not None:
# set previous_value to initializer and get value
value = self._initialize_previous_value(initializer, context=context)
else:
# no initializer, so clear previous_value and set value to None
self.parameters.previous_value._get(context).clear()
value = None
self.parameters.value.set(value, context, override=True)
return value
def _function(self,
variable:Optional[Union[list, np.array]]=None,
context=None,
params=None,
) -> list:
"""
Return entry in `memory <ContentAddressableMemory.memory>` that best matches `variable
<ContentAddressableMemory.variable>`, then add `variable <ContentAddressableMemory.variable>` to `memory
<ContentAddressableMemory.memory>` (see `above <ContentAddressableMemory_Execution>` for additional details).
Arguments
---------
variable : list or 2d array : default class_defaults.variable
used to retrieve an entry from `memory <ContentAddressableMemory.memory>`, and then stored there.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
value of entry that best matches `variable <ContentAddressableMemory.variable>` : 1d array
"""
# Enforce variable to be shape of an entry (1d for ragged fields or 2d for regular ones)
# - note: this allows entries with a single field to be specified as a 1d regular array
# (i.e., without enclosing it in an outer list or array), which are converted to a 2d array
variable = convert_all_elements_to_np_array(variable)
if variable.dtype != object and variable.ndim==1:
variable = np.expand_dims(variable, axis=0)
retrieval_prob = np.array(self._get_current_parameter_value(RETRIEVAL_PROB, context)).astype(float)
storage_prob = np.array(self._get_current_parameter_value(STORAGE_PROB, context)).astype(float)
# get random state
random_state = self._get_current_parameter_value('random_state', context)
# get memory field weights (which are modulable)
distance_field_weights = self._get_current_parameter_value('distance_field_weights', context)
# If this is an initialization run, leave memory empty (don't want to count it as an execution step),
# but set entry size and then return current value (variable[1]) for validation.
if self.is_initializing:
return variable
# Set memory fields sizes and total size if this is the first entry
if self.parameters.previous_value._get(context) is None:
self.parameters.memory_num_fields.set(len(variable), context=context, override=True)
self.parameters.memory_field_shapes.set([item.shape for item in variable], context=context, override=True)
# Retrieve entry from memory that best matches variable
if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()):
entry = self.get_memory(variable, distance_field_weights, context).copy()
else:
# QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)?
# CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT
# SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW
entry = self.uniform_entry(0, context)
# Store variable in memory
if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()):
self._store_memory(variable, context)
return entry
def _validate_entry(self, entry:Union[list, np.ndarray], context) -> None:
field_shapes = self.parameters.memory_field_shapes.get(context)
num_fields = self.parameters.memory_num_fields.get(context)
if not entry.ndim:
# IMPLEMENTATION NOTE: Remove this if/when >2d arrays are supported more generally in PsyNeuLink
raise FunctionError(f"Attempt to store and/or retrieve an entry in {self.__class__.__name__} that has "
f"has dimensions ({entry}); must be a list or 1d or 2d array.")
if entry.ndim >2:
# IMPLEMENTATION NOTE: Remove this if/when >2d arrays are supported more generally in PsyNeuLink
raise FunctionError(f"Attempt to store and/or retrieve an entry in {self.__class__.__name__} ({entry}) "
f"that has more than 2 dimensions ({entry.ndim}); try flattening innermost ones.")
if not len(entry) == num_fields:
raise FunctionError(f"Attempt to store and/or retrieve entry in {self.__class__.__name__} ({entry}) "
f"that has an incorrect number of fields ({len(entry)}; should be {num_fields}).")
owner_name = f'of {self.owner.name}' if self.owner else ''
for i, field in enumerate(entry):
field = np.array(field)
# IMPLEMENTATION NOTE: Remove requirement field.ndim==1 if/when >2d arrays are supported more generally
if field.ndim != 1 or field.shape != field_shapes[i]:
raise FunctionError(f"Field {i} of entry ({entry}) has incorrect shape ({field.shape}) "
f"for memory of '{self.name}{owner_name}'; should be: {field_shapes[i]}.")
def uniform_entry(self, value:Union[int, float], context) -> np.ndarray:
return [np.full(i,value) for i in self.parameters.memory_field_shapes._get(context)]
@handle_external_context()
def get_memory(self, cue:Union[list, np.ndarray], field_weights=None, context=None) -> np.ndarray:
"""get_memory(query_key, context=None)
Retrieve entry from `memory <ContentAddressableMemory.memory>` based on `distance_function
<ContentAddressableMemory.distance_function>` and `selection_function
<ContentAddressableMemory.selection_function>`.
Arguments
---------
cue : list or 2d array
must have same number and shapes of fields as existing entries in `memory <ContentAddressableMemory.memory>`.
Returns
-------
entry retrieved : 2d array
if no retrieval occurs, returns appropriately shaped zero-valued array.
"""
# QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)?
# ALSO, SHOULD PROBABILISTIC SUPPRESSION OF RETRIEVAL BE HANDLED HERE OR function (AS IT IS NOW).
# FIX: RETRIEVE BASED ON SIMILARITY WITHIN EACH FIELD WEIGHTE BY distance_field_weights
_memory = self.parameters.previous_value._get(context)
# if no entries in memory, return the zero vector
if _memory is None:
return self.uniform_entry(0, context)
cue = convert_all_elements_to_np_array(cue)
self._validate_entry(cue, context)
# Get mean of field-wise distances between cue each entry in memory
distances_to_entries = []
for entry in _memory:
distances_to_entries.append(self._get_distance(cue, entry, field_weights, 'full_entry', context))
# Get the best-match(es) in memory based on selection_function and return as non-zero value(s) in an array
selection_array = self.selection_function(distances_to_entries, context=context)
indices_of_selected_items = np.flatnonzero(selection_array)
# Single entry identified
if len(indices_of_selected_items)==1:
index_of_selected_item = int(np.flatnonzero(selection_array))
# More than one entry identified
else:
# Check for any duplicate entries in matches and, if they are not allowed, return zeros
if (not self.duplicate_entries_allowed
and any(self._is_duplicate(_memory[i],_memory[j], field_weights, context)
for i, j in combinations(indices_of_selected_items, 2))):
warnings.warn(f"More than one entry matched cue ({cue}) in memory for {self.name}"
f"{'of ' + self.owner.name if self.owner else ''} even though "
f"{repr('duplicate_entries_allowed')} is False; zeros returned as retrieved item.")
return self.uniform_entry(0, context)
if self.equidistant_entries_select == RANDOM:
random_state = self._get_current_parameter_value('random_state', context)
index_of_selected_item = random_state.choice(indices_of_selected_items)
elif self.equidistant_entries_select == OLDEST:
index_of_selected_item = indices_of_selected_items[0]
elif self.equidistant_entries_select == NEWEST:
index_of_selected_item = indices_of_selected_items[-1]
else:
assert False, f"PROGRAM ERROR: bad specification ({repr(self.equidistant_entries_select)}) for " \
f"'equidistant_entries_select' parameter of {self.name}" \
f"{'for ' + self.owner.name if self.owner else ''}"
best_match = _memory[index_of_selected_item]
best_match_distances = self._get_distance(cue,best_match,field_weights, 'per_field',context)
self.parameters.distance.set(distances_to_entries[index_of_selected_item], context, override=True)
self.parameters.distances_by_field.set(best_match_distances,override=True)
self.parameters.distances_to_entries.set(distances_to_entries, context,override=True)
# Return entry
return best_match
def _store_memory(self, entry:Union[list, np.ndarray], context) -> bool:
"""Add an entry to `memory <ContentAddressableMemory.memory>`
Arguments
---------
entry : list or 2d array
should be a list or 2d array containing 1d arrays (fields) each of which should be list or at least a 1d
array; scalars, 1d and simple 2d arrays are allowed, and are interpreted as a single entry with a single
field, which is converted to a 3d array. If any entries already exist in `memory
<ContentAddressableMemory.memory>`, then both the number of fields and their shapes must match existing
entries (contained in the `memory_num_fields <ContentAddressableMemory.memory_num_fields>` and
`memory_field_shapes <ContentAddressableMemory.memory_field_shapes>` attributes, respectively). All
elements of all entries are converted to np.arrays.
.. technical_note::
this method supports adding entries with items in each field that are greater than 1d for potential
future use (see format_for_storage() below); however they are currently rejected in _validate_entry
as currently they may produce unexpected results (by returning entries that are greater than 2d).
"""
self._validate_entry(entry, context)
# convert all fields and entry itself to arrays
entry = convert_all_elements_to_np_array(entry)
num_fields = self.parameters.memory_num_fields._get(context)
field_weights = self.parameters.distance_field_weights._get(context)
# execute noise if it is a function
noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), entry, context=context)
if noise is not None:
try:
entry = entry + noise
except:
raise FunctionError(f"'noise' for '{self.name}' of '{self.owner.name}' "
f"not appropriate shape (single number or array of length {num_fields}.")
existing_entries = self.parameters.previous_value._get(context)
def format_for_storage(entry:np.ndarray) -> np.ndarray:
"""Format an entry to be added to memory
Returns entry formatted to match the shape of `memory <EpisodicMemoryMechanism.memory>`,
so that it can be appended (or, if it is the first, simply assigned) to memory:
- if entry is a regular array (all fields [axis 0 items] have the same shape),
returns object with ndim = entry.ndim + 1 (see `technical_note <ContentAddressableMemory_Shapes>` above)
- if the entry is a ragged array (fields [axis 0 items] have differing shapes),
returns 2d object with dtype=object.
"""
# Ragged array (i.e., fields of different shapes)
if entry.ndim == 1 and entry.dtype==object:
shape = (1, num_fields)
# Regular array (all fields have the same shapes)
elif entry.ndim >= 2:
# Note: if greater ndim>2, item in each field is >1d
shape = (1, num_fields, entry.shape[1])
else:
raise ContentAddressableMemory(f"Unrecognized format for entry to be stored in {self.name}: {entry}.")
return np.atleast_3d(entry).reshape(shape)
if existing_entries is not None:
# Check for matches of entry with existing entries
matches = [m for m in existing_entries if len(m) and self._is_duplicate(entry, m, field_weights, context)]
# If duplicate entries are not allowed and entry matches any existing entries, don't store
if matches and self.duplicate_entries_allowed is False:
storage_succeeded = False
# If duplicate_entries_allowed is True or OVERWRITE, replace value for matching entry:
# FIX: SHOULD BE OVERWRITE or False
elif matches and self.duplicate_entries_allowed == OVERWRITE:
if len(matches)>1:
# If there is already more than one duplicate, raise error as it is not clear what to overwrite
raise FunctionError(f"Attempt to store item ({entry}) in {self.name} "
f"with 'duplicate_entries_allowed'='OVERWRITE' "
f"when there is more than one matching entry in its memory; "
f"'duplicate_entries_allowed' may have previously been set to 'True'")
try:
index = existing_entries.index(entry)
except AttributeError:
index = [i for i,e in enumerate(existing_entries) if np.all(e == matches[0])][0]
except ValueError:
index = existing_entries.tolist().index(entry)
existing_entries[index] = entry
storage_succeeded = True
else:
# Add to existing entries
existing_entries = np.append(existing_entries, format_for_storage(entry), axis=0)
storage_succeeded = True
else:
# No entries yet, so add new one
existing_entries = format_for_storage(entry)
storage_succeeded = True
if len(existing_entries) > self.max_entries:
existing_entries = np.delete(existing_entries,0,axis=0)
self.parameters.previous_value._set(existing_entries,context)
self._memory = existing_entries
return storage_succeeded
def _get_distance(self, cue:Union[list, np.ndarray],
candidate:Union[list, np.ndarray],
field_weights:Union[list, np.ndarray],
granularity:str,
# granularity:Literal[Union['full_entry', 'per_field']],
context) -> Union[float, np.ndarray]:
"""Get distance of cue from candidate using `distance_function <ContentAddressableMemory.distance_function>`.
- If **granularity**=='full_entry':
returns *single scalar distance* computed over full **cue** and **candidate** entries if all elements of
**fields_weights** are equal (i.e., it is a homogenous array); otherwise it is used to weight the
the distance computed between each field of **cue** and corresponding one of **candidate**,
when computing their mean field-wise distances.
- if **granularity**=='per_field':
returns *array of distances* computed field-wise (hadamard) for **cue** and **candidate**,
weighted by **field_weights**.
.. note::
granularity is only used for reporting field-wise distances in `distances_by_field
<ContentAddressableMemory.distances_by_field>`, and not used to determine retrieval or storage
:returns
scalar if **granularity**=='full_entry';
array if **granularity**=='per_fields'
"""
# Get distance function and params
distance_fct = self.parameters.distance_function._get(context)
num_fields = self.parameters.memory_num_fields._get(context) or len(field_weights)
if field_weights is None:
# Could be from get_memory called from COMMAND LINE without field_weights
field_weights = self._get_current_parameter_value('distance_field_weights', context)
field_weights = np.atleast_1d(field_weights)
if granularity == 'per_field':
# Note: this is just used for reporting, and not determining storage or retrieval
# Replace None's with 0 to allow multiplication
distances_by_field = np.array([distance_fct([cue[i], candidate[i]])
for i in range(num_fields)]
) * np.array([f if f is not None else 0 for f in field_weights])
# If field_weights is scalar, splay out as array of length num_fields so can iterate through all of them
if len(field_weights)==1:
field_weights = np.full(num_fields, field_weights[0])
# Replace 0's with None's for fields with None in field_weights
distances_by_field = np.array([distances_by_field[i]
if f is not None else None for i,f in enumerate(field_weights)])
return distances_by_field
elif granularity == 'full_entry':
# Use first element as scalar if it is a homogenous array (i.e., all elements are the same)
field_weights = field_weights[0] if np.all(field_weights[0]==field_weights) else field_weights
distance_by_fields = not np.isscalar(field_weights)
if distance_by_fields:
num_non_zero_fields = len([fw for fw in field_weights if fw])
# Get mean of field-wise distances between cue each entry in memory, weighted by field_weights
distance = np.sum([distance_fct([cue[i], candidate[i]]) * field_weights[i]
for i in range(num_fields) if field_weights[i]]) / num_non_zero_fields
else:
# Get distances between entire cue vector and all that for each entry in memory
# Note: in this case, field_weights is just a scalar coefficient
distance = distance_fct([np.hstack(cue), np.hstack(candidate)]) * field_weights
return distance
else:
assert False, f"PROGRAM ERROR: call to 'ContentAddressableMemory.get_distance()' method " \
f"with invalid 'granularity' argument ({granularity}); " \
f"should be 'full_entry' or 'per_field."
def _parse_distance_function_variable(self, variable):
return convert_to_np_array([variable[0], variable[0]])
@classmethod
def _enforce_memory_shape(cls, memory):
# Enforce memory to be 2d for ragged fields or 3d for regular ones
# - note: this also allows memory (e.g., via initializer or reset) to be specified with a single entry
# (i.e., without enclosing it in an outer list or array)
memory = convert_all_elements_to_np_array(memory)
memory = np.atleast_2d(memory)
if memory.dtype != object and memory.ndim==2:
memory = np.expand_dims(memory, axis=0)
return memory
def _is_duplicate(self, entry1:np.ndarray, entry2:np.ndarray, field_weights:np.ndarray, context) -> bool:
"""Determines whether two entries are duplicates
Duplicates are treated as ones with a distance within the tolerance specified by duplicate_threshold.
Distances are computed using distance_field_weights.
"""
if (self._get_distance(entry1, entry2, field_weights, 'full_entry', context)
<= self.parameters.duplicate_threshold.get(context)):
return True
return False
@handle_external_context()
def add_to_memory(self, entries:Union[list, np.ndarray], context=None):
"""Add one or more entries into `memory <ContentAddressableMememory.memory>`
Arguments
---------
entries : list or array
a single entry (list or array) or list or array of entries, each of which must be a valid entry;
each must have the same number of and shapes of corresponding fields;
items are added to memory in the order listed.
"""
entries = self._parse_memories(entries, 'add_to_memory', context)
for entry in entries:
self._store_memory(entry, context)
@handle_external_context()
def delete_from_memory(self,
entries:Union[list, np.ndarray],
fields:Optional[Union[int, list]]= None,
context=None):
"""Delete one or more entries from `memory <ContentAddressableMememory.memory>`
Arguments
---------
memories : list or array
a single entry (list or 2d array) or list or array of entries,
each of which must be a valid entry (i.e. same number of fields and shapes of each
as entries already in `memory <ContentAddressableMemory.memory>`.
fields : int or list : default None
if None, delete all entries in `memory <ContentAddressableMemory.memory>` that are identical
to any of the **memories** specified; if int or list, delete all entries with the same values as those
in the field(s) specified.
"""
memories = self._parse_memories(entries, 'add_to_memory', context)
# FIX: ??IS THIS NEEDED (IS IT JUST A HOLDOVER FROM KEYS OR NEEDED FOR LIST-T0-LIST COMPARISON BELOW?):
entries = [list(m) for m in memories]
fields = convert_to_list(fields)
existing_memory = self.parameters.previous_value._get(context)
pruned_memory = existing_memory.copy()
for entry, memory in product(entries, existing_memory):
if (np.all(entry == memory)
or fields and all(entry[f] == memory[f] for f in fields)):
pruned_memory = np.delete(pruned_memory, pruned_memory.tolist().index(memory.tolist()), axis=0)
self._memory = convert_all_elements_to_np_array(pruned_memory)
self.parameters.previous_value._set(self._memory, context)
def _parse_memories(self, entries, method, context=None):
"""Parse passing of single vs. multiple memories, validate memories, and return ndarray
Used by add_to_memory and delete_from_memory
"""
memories = convert_all_elements_to_np_array(entries)
if not 1 <= memories.ndim <= 3:
was_str = f'(was {memories.ndim}d)' if memories.ndim else '(was scalar)'
raise FunctionError(f"The 'memories' arg for {method} method of "
f"must be a list or array containing 1d or 2d arrays {was_str}.")
# if (memories.ndim == 2 and memories.dtype != object) or (memories.ndim == 1 and memories.dtype == object):
if (memories.ndim == 2 and memories.dtype != object) or (memories.ndim == 1):
memories = np.expand_dims(memories,axis=0)
for entry in memories:
self._validate_entry(entry, context)
return memories
@property
def memory(self):
"""Return entries in self._memory as lists in an outer np.array;
use np.array for multi-line printout
"""
try:
return self._memory
except:
return np.array([])
@property
def memory_num_entries(self):
"""Return number of entries in self._memory.
"""
return len(self._memory)
KEYS = 0
VALS = 1
class DictionaryMemory(MemoryFunction): # ---------------------------------------------------------------------
"""
DictionaryMemory( \
default_variable=None, \
retrieval_prob=1.0 \
storage_prob=1.0 \
rate=None, \
noise=0.0, \
initializer=None, \
distance_function=Distance(metric=COSINE), \
selection_function=OneHot(mode=MIN_VAL), \
equidistant_keys_select=RANDOM, \
duplicate_keys=False, \
max_entries=None, \
params=None, \
owner=None, \
prefs=None, \
)
.. _DictionaryMemory:
Implement a configurable, dictionary-style storage and retrieval of key-value pairs, in which storage
is determined by `storage_prob <DictionaryMemory.storage_prob>`, and retrieval of items is
determined by `distance_function <DictionaryMemory.distance_function>`, `selection_function
<DictionaryMemory.selection_function>`, and `retrieval_prob <DictionaryMemory.retrieval_prob>`.
Keys and values may have different lengths, and values may vary in length from entry to entry, but all keys
must be the same length. Duplicate keys can be allowed, disallowed, or overwritten using `duplicate_keys
<DictionaryMemory.duplicate_keys>`), and how selection is made among duplicate keys or ones
indistinguishable by the `distance_function <DictionaryMemory.distance_function>` can be specified
using `equidistant_keys_select <DictionaryMemory.equidistant_keys_select>`.
The class also provides methods for directly retrieving an entry (`get_memory
<DictionaryMemory.get_memory>`), and adding (`add_to_memory <DictionaryMemory.add_to_memory>`)
and deleting (`delete_from_memory <DictionaryMemory.delete_from_memory>`) one or more entries.
.. _DictionaryMemory_Structure:
Structure
---------
An item is stored and retrieved as a 2d array containing a key-value pair ([[key][value]]). A 3d array of such
pairs can be used to initialize the contents of memory by providing it in the **initialzer** argument of the
DictionaryMemory's constructor, or in a call to its `reset <DictionaryMemory.reset>`
method. The current contents of the memory can be inspected using the `memory <DictionaryMemory.memory>`
attribute, which returns a list containing the current entries, each as a 2 item list containing a key-value pair.
.. _DictionaryMemory_Execution:
Execution
---------
When `function <DictionaryMemory.function>` is executed, it first retrieves the
item in `memory <DictionaryMemory.memory>` with the key that most closely matches the key of the item
(key-value pair) in the call, stores the latter in memory, and returns the retrieved item (key-value pair).
If the key of the pair in the call is an exact match of a key in memory and `duplicate_keys
<DictionaryMemory.duplicate_keys>` is False, then the matching item is returned, but the
pair in the call is not stored. These steps are described in more detail below:
* First, with probability `retrieval_prob <DictionaryMemory.retrieval_prob>`, an entry is retrieved from
`memory <DictionaryMemory.memory>` that has a key that is closest to the one in the call (first item of
`variable <DictionaryMemory.variable>`), as determined by the `distance_function
<DictionaryMemory.distance_function>` and `selection_function
<DictionaryMemory.selection_function>`. The `distance_function
<DictionaryMemory.distance_function>` generates a list of distances of each key in memory from the
one in the call; the `selection_function <DictionaryMemory.selection_function>` then determines which
to select ones for consideration. If more than one entry from memory is identified, `equidistant_keys_select
<DictionaryMemory.equidistant_keys_select>` is used to determine which to retrieve. If no retrieval
occurs, an appropriately shaped zero-valued array is assigned as the retrieved memory (and returned by the
`function <DictionaryMemory.function>`.
..
* After retrieval, the key-value pair in the call (`variable <DictionaryMemory.variable>`) is stored in
`memory <DictionaryMemory.memory>` with probability `storage_prob <DictionaryMemory.storage_prob>`.
If the key (`variable <DictionaryMemory.variable>`\\[0]) is identical to one already in `memory
<DictionaryMemory.memory>` and `duplicate_keys <DictionaryMemory.duplicate_keys>`
is set to False, storage is skipped; if it is set to *OVERWRITE*, the value of the key in memory is replaced
with the one in the call. If **rate** and/or **noise** arguments are specified in the
constructor, it is applied to the key before storing, as follows:
.. math::
variable[1] * rate + noise
If the number of entries exceeds `max_entries <DictionaryMemory.max_entries>, the first (oldest) item in
memory is deleted.
Arguments
---------
default_variable : list or 2d array : default class_defaults.variable
specifies a template for the key and value entries of the dictionary; list must have two entries, each
of which is a list or array; first item is used as key, and second as value entry of dictionary.
retrieval_prob : float in interval [0,1] : default 1.0
specifies probability of retrieiving a key from `memory <DictionaryMemory.memory>`.
storage_prob : float in interval [0,1] : default 1.0
specifies probability of adding `variable <DictionaryMemory.variable>` to `memory
<DictionaryMemory.memory>`.
rate : float, list, or array : default 1.0
specifies a value used to multiply key (first item of `variable <DictionaryMemory.variable>`) before
storing in `memory <DictionaryMemory.memory>` (see `rate <DictionaryMemory.noise> for details).
noise : float, list, array, or Function : default 0.0
specifies a random value added to key (first item of `variable <DictionaryMemory.variable>`) before
storing in `memory <DictionaryMemory.memory>` (see `noise <DictionaryMemory.noise> for details).
initializer : 3d array or list : default None
specifies an initial set of entries for `memory <DictionaryMemory.memory>`. It must be of the following
form: [[[key],[value]], [[key],[value]], ...], such that each item in the outer dimension (axis 0)
is a 2d array or list containing a key and a value pair for that entry. All of the keys must be 1d arrays or
lists of the same length.
distance_function : Distance or function : default Distance(metric=COSINE)
specifies the function used during retrieval to compare the first item in `variable
<DictionaryMemory.variable>` with keys in `memory <DictionaryMemory.memory>`.
selection_function : OneHot or function : default OneHot(mode=MIN_VAL)
specifies the function used during retrieval to evaluate the distances returned by `distance_function
<DictionaryMemory.distance_function>` and select the item to return.
equidistant_keys_select: RANDOM | OLDEST | NEWEST : default RANDOM
specifies which item is chosen for retrieval if two or more keys have the same distance from the first item of
`variable <DictionaryMemory.variable>`.
duplicate_keys : bool | OVERWRITE : default False
specifies whether entries with duplicate keys are allowed in `memory <DictionaryMemory.memory>`
(see `duplicate_keys <DictionaryMemory.duplicate_keys for additional details>`).
max_entries : int : default None
specifies the maximum number of entries allowed in `memory <DictionaryMemory.memory>`
(see `max_entries <DictionaryMemory.max_entries for additional details>`).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 2d array
1st item (variable[0] is the key used to retrieve an enrtry from `memory <DictionaryMemory.memory>`,
and 2nd item (variable[1]) is the value of the entry, paired with key and added to the `memory
<DictionaryMemory.memory>`.
key_size : int
length of keys in `memory <DictionaryMemory.memory>`.
val_size : int
length of values in `memory <DictionaryMemory.memory>`.
retrieval_prob : float in interval [0,1]
probability of retrieving a value from `memory <DictionaryMemory.memory>`.
storage_prob : float in interval [0,1]
probability of adding `variable <DictionaryMemory.variable>` to `memory
<DictionaryMemory.memory>`;
rate : float or 1d array
value applied multiplicatively to key (first item of `variable <DictionaryMemory.variable>`) before
storing in `memory <DictionaryMemory.memory>` (see `rate <Stateful_Rate>` for additional details).
noise : float, 1d array or Function
value added to key (first item of `variable <DictionaryMemory.variable>`) before storing in
`memory <DictionaryMemory.memory>` (see `noise <Stateful_Noise>` for additional details).
initializer : 3d array
initial set of entries for `memory <DictionaryMemory.memory>`; each is a 2d array with a key-value pair.
memory : list
list of key-value pairs containing entries in DictionaryMemory:
[[[key 1], [value 1]], [[key 2], value 2]]...]
distance_function : Distance or function : default Distance(metric=COSINE)
function used during retrieval to compare the first item in `variable <DictionaryMemory.variable>`
with keys in `memory <DictionaryMemory.memory>`.
selection_function : OneHot or function : default OneHot(mode=MIN_VAL)
function used during retrieval to evaluate the distances returned by `distance_function
<DictionaryMemory.distance_function>` and select the item(s) to return.
previous_value : 1d array
state of the `memory <DictionaryMemory.memory>` prior to storing `variable
<DictionaryMemory.variable>` in the current call.
duplicate_keys : bool | OVERWRITE
determines whether entries with duplicate keys are allowed in `memory <DictionaryMemory.memory>`.
If True (the default), items with keys that are the same as ones in memory can be stored; on retrieval, a
single one is selected based on `equidistant_keys_select <DictionaryMemory.equidistant_keys_select>`.
If False, then an attempt to store and item with a key that is already in `memory
<DictionaryMemory.memory>` is ignored, and the entry already in memory with that key is retrieved.
If a duplicate key is identified during retrieval (e.g., **duplicate_keys** is changed from True to
False), a warning is issued and zeros are returned. If *OVERWRITE*, then retrieval of a cue with an identical
key causes the value at that entry to be overwritten with the new value.
equidistant_keys_select: RANDOM | OLDEST | NEWEST
deterimines which entry is retrieved when duplicate keys are identified or are indistinguishable by the
`distance_function <DictionaryMemory.distance_function>`.
max_entries : int
maximum number of entries allowed in `memory <DictionaryMemory.memory>`; if storing a memory
exceeds the number, the oldest memory is deleted.
random_state : numpy.RandomState
private pseudorandom number generator
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
Returns
-------
value and key of entry that best matches first item of `variable <DictionaryMemory.variable>` : 2d array
if no retrieval occures, an appropriately shaped zero-valued array is returned.
"""
componentName = DictionaryMemory_FUNCTION
class Parameters(StatefulFunction.Parameters):
"""
Attributes
----------
variable
see `variable <DictionaryMemory.variable>`
:default value: [[0], [0]]
:type: ``list``
distance_function
see `distance_function <DictionaryMemory.distance_function>`
:default value: `Distance`(metric=cosine)
:type: `Function`
duplicate_keys
see `duplicate_keys <DictionaryMemory.duplicate_keys>`
:default value: False
:type: ``bool``
equidistant_keys_select
see `equidistant_keys_select <DictionaryMemory.equidistant_keys_select>`
:default value: `RANDOM`
:type: ``str``
key_size
see `key_size <DictionaryMemory.key_size>`
:default value: 1
:type: ``int``
max_entries
see `max_entries <DictionaryMemory.max_entries>`
:default value: 1000
:type: ``int``
noise
see `noise <DictionaryMemory.noise>`
:default value: 0.0
:type: ``float``
random_state
see `random_state <DictionaryMemory.random_state>`
:default value: None
:type: ``numpy.random.RandomState``
rate
see `rate <DictionaryMemory.rate>`
:default value: 1.0
:type: ``float``
retrieval_prob
see `retrieval_prob <DictionaryMemory.retrieval_prob>`
:default value: 1.0
:type: ``float``
selection_function
see `selection_function <DictionaryMemory.selection_function>`
:default value: `OneHot`(mode=MIN_INDICATOR)
:type: `Function`
storage_prob
see `storage_prob <DictionaryMemory.storage_prob>`
:default value: 1.0
:type: ``float``
val_size
see `val_size <DictionaryMemory.val_size>`
:default value: 1
:type: ``int``
"""
variable = Parameter([[0],[0]], pnl_internal=True, constructor_argument='default_variable')
retrieval_prob = Parameter(1.0, modulable=True)
storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
key_size = Parameter(1, stateful=True)
val_size = Parameter(1, stateful=True)
duplicate_keys = Parameter(False)
equidistant_keys_select = Parameter(RANDOM)
rate = Parameter(1.0, modulable=True)
noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
max_entries = Parameter(1000)
random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed')
seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter)
distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False)
selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False)
@tc.typecheck
def __init__(self,
default_variable=None,
retrieval_prob: tc.optional(tc.any(int, float))=None,
storage_prob: tc.optional(tc.any(int, float))=None,
noise: tc.optional(tc.any(int, float, list, np.ndarray, callable))=None,
rate: tc.optional(tc.any(int, float, list, np.ndarray))=None,
initializer=None,
distance_function:tc.optional(tc.any(Distance, is_function_type))=None,
selection_function:tc.optional(tc.any(OneHot, is_function_type))=None,
duplicate_keys:tc.optional(tc.any(bool, tc.enum(OVERWRITE)))=None,
equidistant_keys_select:tc.optional(tc.enum(RANDOM, OLDEST, NEWEST))=None,
max_entries=None,
seed=None,
params: tc.optional(tc.optional(tc.any(list, np.ndarray))) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
if initializer is None:
initializer = []
self._memory = []
super().__init__(
default_variable=default_variable,
retrieval_prob=retrieval_prob,
storage_prob=storage_prob,
initializer=initializer,
duplicate_keys=duplicate_keys,
equidistant_keys_select=equidistant_keys_select,
rate=rate,
noise=noise,
max_entries=max_entries,
seed=seed,
params=params,
owner=owner,
prefs=prefs,
)
if self.previous_value.size != 0:
self.parameters.key_size.set(len(self.previous_value[KEYS][0]))
self.parameters.val_size.set(len(self.previous_value[VALS][0]))
def _parse_distance_function_variable(self, variable):
# actual used variable in execution (get_memory) checks distance
# between key and key, not key and val as implied in _validate
return convert_to_np_array([variable[KEYS], variable[KEYS]])
def _parse_selection_function_variable(self, variable, context=None):
# this should be replaced in the future with the variable
# argument when function ordering (and so ordering of parsers)
# is made explicit
distance_result = self.distance_function.parameters.value._get(context)
# TEST PRINT:
# print(distance_result, self.distance_function.defaults.value)
return np.asfarray([
distance_result if i == 0 else np.zeros_like(distance_result)
for i in range(self.defaults.max_entries)
])
def _get_state_ids(self):
return super()._get_state_ids() + ["ring_memory"]
def _get_state_struct_type(self, ctx):
# Construct a ring buffer
max_entries = self.parameters.max_entries.get()
key_type = ctx.convert_python_struct_to_llvm_ir(self.defaults.variable[0])
keys_struct = pnlvm.ir.ArrayType(key_type, max_entries)
val_type = ctx.convert_python_struct_to_llvm_ir(self.defaults.variable[1])
vals_struct = pnlvm.ir.ArrayType(val_type, max_entries)
ring_buffer_struct = pnlvm.ir.LiteralStructType((
keys_struct, vals_struct, ctx.int32_ty, ctx.int32_ty))
generic_struct = ctx.get_state_struct_type(super())
return pnlvm.ir.LiteralStructType((*generic_struct,
ring_buffer_struct))
def _get_state_initializer(self, context):
memory = self.parameters.previous_value._get(context)
mem_init = pnlvm._tupleize([memory[0], memory[1], 0, 0])
return (*super()._get_state_initializer(context), mem_init)
def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
# PRNG
rand_struct = ctx.get_random_state_ptr(builder, self, state, params)
uniform_f = ctx.get_uniform_dist_function_by_state(rand_struct)
# Ring buffer
buffer_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "ring_memory")
keys_ptr = builder.gep(buffer_ptr, [ctx.int32_ty(0), ctx.int32_ty(0)])
vals_ptr = builder.gep(buffer_ptr, [ctx.int32_ty(0), ctx.int32_ty(1)])
count_ptr = builder.gep(buffer_ptr, [ctx.int32_ty(0), ctx.int32_ty(2)])
wr_ptr = builder.gep(buffer_ptr, [ctx.int32_ty(0), ctx.int32_ty(3)])
# Input
var_key_ptr = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)])
var_val_ptr = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)])
# Zero output
builder.store(arg_out.type.pointee(None), arg_out)
out_key_ptr = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)])
out_val_ptr = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(1)])
# Check retrieval probability
retr_ptr = builder.alloca(ctx.bool_ty)
builder.store(retr_ptr.type.pointee(1), retr_ptr)
retr_prob_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, RETRIEVAL_PROB)
# Prob can be [x] if we are part of a mechanism
retr_prob = pnlvm.helpers.load_extract_scalar_array_one(builder, retr_prob_ptr)
retr_rand = builder.fcmp_ordered('<', retr_prob, retr_prob.type(1.0))
max_entries = len(vals_ptr.type.pointee)
entries = builder.load(count_ptr)
entries = pnlvm.helpers.uint_min(builder, entries, max_entries)
# The call to random function needs to be after check to match python
with builder.if_then(retr_rand):
rand_ptr = builder.alloca(ctx.float_ty)
builder.call(uniform_f, [rand_struct, rand_ptr])
rand = builder.load(rand_ptr)
passed = builder.fcmp_ordered('<', rand, retr_prob)
builder.store(passed, retr_ptr)
# Retrieve
retr = builder.load(retr_ptr)
with builder.if_then(retr, likely=True):
# Determine distances
distance_f = ctx.import_llvm_function(self.distance_function)
distance_params = pnlvm.helpers.get_param_ptr(builder, self, params, "distance_function")
distance_state = pnlvm.helpers.get_state_ptr(builder, self, state, "distance_function")
distance_arg_in = builder.alloca(distance_f.args[2].type.pointee)
builder.store(builder.load(var_key_ptr),
builder.gep(distance_arg_in, [ctx.int32_ty(0),
ctx.int32_ty(0)]))
selection_arg_in = builder.alloca(pnlvm.ir.ArrayType(distance_f.args[3].type.pointee, max_entries))
with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b, idx):
compare_ptr = b.gep(keys_ptr, [ctx.int32_ty(0), idx])
b.store(b.load(compare_ptr),
b.gep(distance_arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)]))
distance_arg_out = b.gep(selection_arg_in, [ctx.int32_ty(0), idx])
b.call(distance_f, [distance_params, distance_state,
distance_arg_in, distance_arg_out])
selection_f = ctx.import_llvm_function(self.selection_function)
selection_params = pnlvm.helpers.get_param_ptr(builder, self, params, "selection_function")
selection_state = pnlvm.helpers.get_state_ptr(builder, self, state, "selection_function")
selection_arg_out = builder.alloca(selection_f.args[3].type.pointee)
builder.call(selection_f, [selection_params, selection_state,
selection_arg_in, selection_arg_out])
# Find the selected index
selected_idx_ptr = builder.alloca(ctx.int32_ty)
builder.store(ctx.int32_ty(0), selected_idx_ptr)
with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b,idx):
selection_val = b.load(b.gep(selection_arg_out, [ctx.int32_ty(0), idx]))
non_zero = b.fcmp_ordered('!=', selection_val, selection_val.type(0))
with b.if_then(non_zero):
b.store(idx, selected_idx_ptr)
selected_idx = builder.load(selected_idx_ptr)
selected_key = builder.load(builder.gep(keys_ptr, [ctx.int32_ty(0),
selected_idx]))
selected_val = builder.load(builder.gep(vals_ptr, [ctx.int32_ty(0),
selected_idx]))
builder.store(selected_key, out_key_ptr)
builder.store(selected_val, out_val_ptr)
# Check storage probability
store_ptr = builder.alloca(ctx.bool_ty)
builder.store(store_ptr.type.pointee(1), store_ptr)
store_prob_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, STORAGE_PROB)
# Prob can be [x] if we are part of a mechanism
store_prob = pnlvm.helpers.load_extract_scalar_array_one(builder, store_prob_ptr)
store_rand = builder.fcmp_ordered('<', store_prob, store_prob.type(1.0))
# The call to random function needs to be behind jump to match python
# code
with builder.if_then(store_rand):
rand_ptr = builder.alloca(ctx.float_ty)
builder.call(uniform_f, [rand_struct, rand_ptr])
rand = builder.load(rand_ptr)
passed = builder.fcmp_ordered('<', rand, store_prob)
builder.store(passed, store_ptr)
# Store
store = builder.load(store_ptr)
with builder.if_then(store, likely=True):
# Check if such key already exists
is_new_key_ptr = builder.alloca(ctx.bool_ty)
builder.store(is_new_key_ptr.type.pointee(1), is_new_key_ptr)
with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b,idx):
cmp_key_ptr = b.gep(keys_ptr, [ctx.int32_ty(0), idx])
# Vector compare
# TODO: move this to helpers
key_differs_ptr = b.alloca(ctx.bool_ty)
b.store(key_differs_ptr.type.pointee(0), key_differs_ptr)
with pnlvm.helpers.array_ptr_loop(b, cmp_key_ptr, "key_compare") as (b2, idx2):
var_key_element = b2.gep(var_key_ptr, [ctx.int32_ty(0), idx2])
cmp_key_element = b2.gep(cmp_key_ptr, [ctx.int32_ty(0), idx2])
element_differs = b.fcmp_unordered('!=',
b.load(var_key_element),
b.load(cmp_key_element))
key_differs = b2.load(key_differs_ptr)
key_differs = b2.or_(key_differs, element_differs)
b2.store(key_differs, key_differs_ptr)
key_differs = b.load(key_differs_ptr)
is_new_key = b.load(is_new_key_ptr)
is_new_key = b.and_(is_new_key, key_differs)
b.store(is_new_key, is_new_key_ptr)
# Add new key + val if does not exist yet
is_new_key = builder.load(is_new_key_ptr)
with builder.if_then(is_new_key):
write_idx = builder.load(wr_ptr)
store_key_ptr = builder.gep(keys_ptr, [ctx.int32_ty(0), write_idx])
store_val_ptr = builder.gep(vals_ptr, [ctx.int32_ty(0), write_idx])
builder.store(builder.load(var_key_ptr), store_key_ptr)
builder.store(builder.load(var_val_ptr), store_val_ptr)
# Update counters
write_idx = builder.add(write_idx, write_idx.type(1))
write_idx = builder.urem(write_idx, write_idx.type(max_entries))
builder.store(write_idx, wr_ptr)
count = builder.load(count_ptr)
count = builder.add(count, count.type(1))
builder.store(count, count_ptr)
return builder
def _validate_params(self, request_set, target_set=None, context=None):
super()._validate_params(request_set=request_set, target_set=target_set, context=context)
if RETRIEVAL_PROB in request_set and request_set[RETRIEVAL_PROB] is not None:
retrieval_prob = request_set[RETRIEVAL_PROB]
if not all_within_range(retrieval_prob, 0, 1):
raise FunctionError("{} arg of {} ({}) must be a float in the interval [0,1]".
format(repr(RETRIEVAL_PROB), self.__class___.__name__, retrieval_prob))
if STORAGE_PROB in request_set and request_set[STORAGE_PROB] is not None:
storage_prob = request_set[STORAGE_PROB]
if not all_within_range(storage_prob, 0, 1):
raise FunctionError("{} arg of {} ({}) must be a float in the interval [0,1]".
format(repr(STORAGE_PROB), self.__class___.__name__, storage_prob))
def _validate(self, context=None):
"""Validate distance_function, selection_function and memory store"""
distance_function = self.distance_function
test_var = [self.defaults.variable[KEYS], self.defaults.variable[VALS]]
if isinstance(distance_function, type):
distance_function = distance_function(default_variable=test_var)
fct_msg = 'Function type'
else:
distance_function.defaults.variable = test_var
distance_function._instantiate_value(context)
fct_msg = 'Function'
try:
distance_result = distance_function(test_var, context=context)
if not np.isscalar(distance_result):
raise FunctionError("Value returned by {} specified for {} ({}) must return a scalar".
format(repr(DISTANCE_FUNCTION), self.__name__.__class__, distance_result))
except:
raise FunctionError("{} specified for {} arg of {} ({}) "
"must accept a list with two 1d arrays or a 2d array as its argument".
format(fct_msg, repr(DISTANCE_FUNCTION), self.__class__,
distance_function))
# Default to full memory
selection_function = self.selection_function
test_var = np.asfarray([distance_result if i==0
else np.zeros_like(distance_result)
for i in range(self._get_current_parameter_value('max_entries', context))])
if isinstance(selection_function, type):
selection_function = selection_function(default_variable=test_var, context=context)
fct_string = 'Function type'
else:
selection_function.defaults.variable = test_var
selection_function._instantiate_value(context)
fct_string = 'Function'
try:
result = np.asarray(selection_function(test_var, context=context))
except e:
raise FunctionError(f'{fct_string} specified for {repr(SELECTION_FUNCTION)} arg of {self.__class__} '
f'({selection_function}) must accept a 1d array as its argument')
if result.shape != test_var.shape:
raise FunctionError(f'Value returned by {repr(SELECTION_FUNCTION)} specified for {self.__class__} '
f'({result}) must return an array of the same length it receives')
def _initialize_previous_value(self, initializer, context=None):
"""Ensure that initializer is appropriate for assignment as memory attribute and assign as previous_value
- Validate, if initializer is specified, it is a 3d array
(must be done here rather than in validate_params as it is needed to initialize previous_value
- Insure that it has exactly 2 items in outer dimension (axis 0)
and that all items in each of those two items are all arrays
"""
# vals = [[k for k in initializer.keys()], [v for v in initializer.values()]]
previous_value = np.ndarray(shape=(2, 0))
if len(initializer) == 0:
return previous_value
else:
# Set key_size and val_size if this is the first entry
self.parameters.previous_value.set(previous_value, context, override=True)
self.parameters.key_size.set(len(initializer[0][KEYS]), context)
self.parameters.val_size.set(len(initializer[0][VALS]), context)
for entry in initializer:
if not self._store_memory(np.array(entry), context):
warnings.warn(f"Attempt to initialize memory of {self.__class__.__name__} with an entry ({entry}) "
f"that has the same key as a previous one, while 'duplicate_keys'==False; "
f"that entry has been skipped")
return convert_to_np_array(self._memory)
def _instantiate_attributes_before_function(self, function=None, context=None):
self.parameters.previous_value._set(
self._initialize_previous_value(
self.parameters.initializer._get(context),
context
),
context
)
if isinstance(self.distance_function, type):
self.distance_function = self.distance_function(context=context)
if isinstance(self.selection_function, type):
self.selection_function = self.selection_function(context=context)
@handle_external_context(fallback_most_recent=True)
def reset(self, previous_value=None, context=None):
"""
reset(<new_dictionary> default={})
Clears the memory in `previous_value <DictionaryMemory.previous_value>`.
If an argument is passed into reset or if the `initializer <DictionaryMemory.initializer>`
attribute contains a value besides [], then that value is used to start the new memory in `previous_value
<DictionaryMemory.previous_value>`. Otherwise, the new `previous_value
<DictionaryMemory.previous_value>` memory starts out empty.
`value <DictionaryMemory.value>` takes on the same value as
`previous_value <DictionaryMemory.previous_value>`.
"""
# no arguments were passed in -- use current values of initializer attributes
if previous_value is None:
previous_value = self._get_current_parameter_value("initializer", context)
if previous_value == []:
self.parameters.previous_value._get(context).clear()
value = np.ndarray(shape=(2, 0, len(self.defaults.variable[0])))
else:
value = self._initialize_previous_value(previous_value, context=context)
self.parameters.value.set(value, context, override=True)
return value
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Return entry in `memory <DictionaryMemory.memory>` that key of which best matches first item of
`variable <DictionaryMemory.variable>` (query key), then add `variable
<DictionaryMemory.variable>` to `memory <DictionaryMemory.memory>` (see `above
<DictionaryMemory_Execution>` for additional details).
Arguments
---------
variable : list or 2d array : default class_defaults.variable
first item (variable[0]) is treated as the key for retrieval; second item (variable[1]), paired
with key, is added to `memory <DictionaryMemory.memory>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
value of entry that best matches first item of `variable <DictionaryMemory.variable>` : 1d array
"""
key = variable[KEYS]
# if len(variable)==2:
val = variable[VALS]
retrieval_prob = np.array(self._get_current_parameter_value(RETRIEVAL_PROB, context)).astype(float)
storage_prob = np.array(self._get_current_parameter_value(STORAGE_PROB, context)).astype(float)
# execute noise if it is a function
noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), variable, context=context)
# get random state
random_state = self._get_current_parameter_value('random_state', context)
# If this is an initialization run, leave memory empty (don't want to count it as an execution step),
# but set key and value size and then return current value (variable[1]) for validation.
if self.is_initializing:
return variable
# Set key_size and val_size if this is the first entry
if len(self.parameters.previous_value._get(context)[KEYS]) == 0:
self.parameters.key_size._set(len(key), context)
self.parameters.val_size._set(len(val), context)
# Retrieve value from current dict with key that best matches key
if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()):
memory = self.get_memory(key, context)
else:
# QUESTION: SHOULD IT RETURN 0's VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE & OutputPort FROM LAST TRIAL)?
# CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT
# SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW
memory = [[0]* self.parameters.key_size._get(context), [0]* self.parameters.val_size._get(context)]
# Store variable to dict:
if noise is not None:
key = np.asarray(key, dtype=float)
if isinstance(noise, numbers.Number):
key += noise
else:
# assume array with same shape as variable
# TODO: does val need noise?
key += noise[KEYS]
if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()):
self._store_memory(variable, context)
# Return 3d array with keys and vals as lists
# IMPLEMENTATION NOTE: if try to create np.ndarray directly, and keys and vals have same length
# end up with array of arrays, rather than array of lists
ret_val = convert_to_np_array([list(memory[0]),[]])
ret_val[1] = list(memory[1])
return ret_val
@tc.typecheck
def _validate_memory(self, memory:tc.any(list, np.ndarray), context):
# memory must be list or 2d array with 2 items
if len(memory) != 2 and not all(np.array(i).ndim == 1 for i in memory):
raise FunctionError(f"Attempt to store memory in {self.__class__.__name__} ({memory}) "
f"that is not a 2d array with two items ([[key],[value]])")
self._validate_key(memory[KEYS], context)
@tc.typecheck
def _validate_key(self, key:tc.any(list, np.ndarray), context):
# Length of key must be same as that of existing entries (so it can be matched on retrieval)
if len(key) != self.parameters.key_size._get(context):
raise FunctionError(f"Length of 'key' ({key}) to store in {self.__class__.__name__} ({len(key)}) "
f"must be same as others in the dict ({self.parameters.key_size._get(context)})")
@tc.typecheck
@handle_external_context()
def get_memory(self, query_key:tc.any(list, np.ndarray), context=None):
"""get_memory(query_key, context=None)
Retrieve memory from `memory <DictionaryMemory.memory>` based on `distance_function
<DictionaryMemory.distance_function>` and `selection_function
<DictionaryMemory.selection_function>`.
Arguments
---------
query_key : list or 1d array
must be same length as key(s) of any existing entries in `memory <DictionaryMemory.memory>`.
Returns
-------
value and key for item retrieved : 2d array as list
if no retrieval occurs, returns appropriately shaped zero-valued array.
"""
# QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)?
# ALSO, SHOULD PROBABILISTIC SUPPRESSION OF RETRIEVAL BE HANDLED HERE OR function (AS IT IS NOW).
self._validate_key(query_key, context)
_memory = self.parameters.previous_value._get(context)
# if no memory, return the zero vector
if len(_memory[KEYS]) == 0:
# zeros_key = [0] * self.parameters.key_size.get(context)
# zeros_val = [0] * self.parameters.val_size.get(context)
zeros_key = [0] * self.parameters.key_size.get(context)
zeros_val = [0] * self.parameters.val_size.get(context)
return [zeros_key, zeros_val]
# Get distances between query_key and all keys in memory
distances = [self.distance_function([query_key, list(m)]) for m in _memory[KEYS]]
# Get the best-match(es) in memory based on selection_function and return as non-zero value(s) in an array
selection_array = self.selection_function(distances, context=context)
indices_of_selected_items = np.flatnonzero(selection_array)
# Single key identified
if len(indices_of_selected_items)==1:
index_of_selected_item = int(np.flatnonzero(selection_array))
# More than one key identified
else:
selected_keys = _memory[KEYS]
# Check for any duplicate keys in matches and, if they are not allowed, return zeros
if (not self.duplicate_keys
and any(list(selected_keys[indices_of_selected_items[0]])==list(selected_keys[other])
for other in indices_of_selected_items[1:])):
warnings.warn(f'More than one item matched key ({query_key}) in memory for {self.name} of '
f'{self.owner.name} even though {repr("duplicate_keys")} is False')
return [[0]* self.parameters.key_size._get(context),
[0]* self.parameters.val_size._get(context)]
if self.equidistant_keys_select == RANDOM:
random_state = self._get_current_parameter_value('random_state', context)
index_of_selected_item = random_state.choice(indices_of_selected_items)
elif self.equidistant_keys_select == OLDEST:
index_of_selected_item = indices_of_selected_items[0]
elif self.equidistant_keys_select == NEWEST:
index_of_selected_item = indices_of_selected_items[-1]
else:
assert False, f'PROGRAM ERROR: bad specification ({self.equidistant_keys_select}) for ' \
f'\'equidistant_keys_select parameter of {self.name} for {self.owner.name}'
best_match_key = _memory[KEYS][index_of_selected_item]
best_match_val = _memory[VALS][index_of_selected_item]
# Return as list of lists
return [list(best_match_key), list(best_match_val)]
@tc.typecheck
def _store_memory(self, memory:tc.any(list, np.ndarray), context):
"""Save an key-value pair to `memory <DictionaryMemory.memory>`
Arguments
---------
memory : list or 2d array
must be two items, a key and a vaue, each of which must a list of numbers or 1d array;
the key must be the same length as key(s) of any existing entries in `dict <DictionaryMemory.dict>`.
"""
self._validate_memory(memory, context)
key = list(memory[KEYS])
val = list(memory[VALS])
d = self.parameters.previous_value._get(context)
matches = [k for k in d[KEYS] if key==list(k)]
# If dupliciate keys are not allowed and key matches any existing keys, don't store
if matches and self.duplicate_keys is False:
storage_succeeded = False
# If dupliciate_keys is specified as OVERWRITE, replace value for matching key:
elif matches and self.duplicate_keys == OVERWRITE:
if len(matches)>1:
raise FunctionError(f"Attempt to store item ({memory}) in {self.name} "
f"with 'duplicate_keys'='OVERWRITE' "
f"when there is more than one matching key in its memory; "
f"'duplicate_keys' may have previously been set to 'True'")
try:
index = d[KEYS].index(key)
except AttributeError:
index = d[KEYS].tolist().index(key)
except ValueError:
index = np.array(d[KEYS]).tolist().index(key)
d[VALS][index] = val
storage_succeeded = True
else:
# Append new key and value to their respective lists
keys = list(d[KEYS])
keys.append(key)
values = list(d[VALS])
values.append(val)
# Return 3d array with keys and vals as lists
d = [keys, values]
storage_succeeded = True
if len(d[KEYS]) > self.max_entries:
d = np.delete(d, [KEYS], axis=1)
self.parameters.previous_value._set(d,context)
self._memory = d
return storage_succeeded
@tc.typecheck
@handle_external_context()
def add_to_memory(self, memories:tc.any(list, np.ndarray), context=None):
"""Add one or more key-value pairs into `memory <ContentAddressableMemory.memory>`
Arguments
---------
memories : list or array
a single memory (list or 2d array) or list or array of memorys, each of which must be a valid entry
consisting of two items (e.g., [[key],[value]] or [[[key1],[value1]],[[key2],[value2]]].
The keys must all be the same length and equal to the length as key(s) of any existing entries in `dict
<DictionaryMemory.dict>`. Items are added to memory in the order listed.
"""
memories = self._parse_memories(memories, 'add_to_memory', context)
for memory in memories:
self._store_memory(memory, context)
@tc.typecheck
@handle_external_context()
def delete_from_memory(self, memories:tc.any(list, np.ndarray), key_only:bool= True, context=None):
"""Delete one or more key-value pairs from `memory <ContentAddressableMememory.memory>`
Arguments
---------
memories : list or array
a single memory (list or 2d array) or list or array of memorys, each of which must be a valid entry
consisting of two items (e.g., [[key],[value]] or [[[key1],[value1]],[[key2],[value2]]].
key_only : bool : default True
if True, delete all memories with the same keys as those listed in **memories**; if False,
delete only memories that have the same key *and* value as those listed in **memories**.
"""
memories = self._parse_memories(memories, 'add_to_memory', context)
keys = [list(k) for k in memories[0]]
vals = [list(k) for k in memories[0]]
for i, key in enumerate(keys):
for j, stored_key in enumerate(self._memory[KEYS]):
if key == list(stored_key):
if key_only or vals[j] == list(self._memory[VALS][j]):
memory_keys = np.delete(self._memory[KEYS],j,axis=0)
memory_vals = np.delete(self._memory[VALS],j,axis=0)
self._memory = np.array([list(memory_keys), list(memory_vals)])
self.parameters.previous_value._set(self._memory, context)
def _parse_memories(self, memories, method, context=None):
"""Parse passing of single vs. multiple memories, validate memories, and return ndarray"""
memories = convert_to_np_array(memories)
if not 1 <= memories.ndim <= 3:
raise FunctionError(f"'memories' arg for {method} method of {self.__class__.__name__} "
f"must be a 2-item list or 2d array, or a list or 3d array containing those")
if (memories.ndim == 2 and memories.dtype != object) or (memories.ndim == 1 and memories.dtype == object):
memories = np.expand_dims(memories,axis=0)
for memory in memories:
self._validate_memory(memory, context)
return memories
@property
def memory(self):
try:
# Return 3d array with keys and vals as lists
# IMPLEMENTATION NOTE: array is used for multi-line printout
return np.array(list(zip(self._memory[KEYS],self._memory[VALS])))
except:
return | np.array([]) | numpy.array |
import tensorflow as tf
import numpy as np
| np.set_printoptions(precision=2, linewidth=200) | numpy.set_printoptions |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 12:28:56 2020
@author: <NAME>
"""
import pandas as pd
import numpy as np
def targetcheck(data):
""" Checks if the dataset is of classification model or regression model"""
target = data.iloc[:,-1]
unique_values = target.nunique()
if(unique_values/len(target)>0.65):
return "Regression"
else:
return("Classification")
def missing_values(df):
cols = df.columns
num_cols = list(df._get_numeric_data().columns)
char_cols= list(set(cols) - set(num_cols))
for cols in char_cols:
df[cols] = df[cols].fillna(value=df[cols].mode()[0])
for cols in num_cols:
df[cols] = df[cols].fillna(value=df[cols].mean())
return df,char_cols
def remove_outliers(df,cat_cols):
num_cols = list(set(df.columns) - set(cat_cols))
for col in num_cols:
if col is not df.columns[-1]:
feature = df[col]
sorted(feature)
q1,q3 = feature.quantile([0.25,0.75])
iqr = q3-q1
upper_limit = q3 + (1.5 * iqr)
lower_limit = q1 - (1.5 * iqr)
df[col] = | np.where(df[col]>upper_limit,upper_limit,df[col]) | numpy.where |
# encoding: utf-8
from datetime import datetime
import numpy as np
import tensorflow as tf
import dataset
import metrics_tf
from dataset import DataSet
import os
import time
import tensorflow.contrib.layers as layers
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
import losses
current_time = time.strftime("%Y-%m-%d--%H-%M-%S", time.gmtime())
# these weights are from resnet: https://github.com/ry/tensorflow-resnet/blob/master/resnet.py
BN_DECAY = 0.9997
BN_EPSILON = 1e-3
CONV_WEIGHT_DECAY = 4e-5
CONV_WEIGHT_STDDEV = 0.1
MAX_EPOCHS = int(1e6)
LOG_DEVICE_PLACEMENT = False
# BATCH_SIZE = 8
BATCH_SIZE = 4 # batch size 8 does not fit to Nvidia GTX 1080 Ti. Hopefully batch size 4 will fit
# TRAIN_FILE = "train.csv"
# TEST_FILE = "test.csv"
# TRAIN_FILE = "train-small.csv"
# TEST_FILE = "train-small.csv"
# TRAIN_FILE = "train-nyu.csv"
# TEST_FILE = "test-nyu.csv"
# TRAIN_FILE = "train-depth-gta.csv"
# TEST_FILE = "test-depth-gta.csv"
# for voxelmap
TRAIN_FILE = "train-voxel-gta.csv"
TEST_FILE = "test-voxel-gta.csv"
# for trying to overfit
# TRAIN_FILE = "train-gta-small.csv"
# TEST_FILE = "train-gta-small.csv"
PREDICT_DIR = os.path.join('predict', current_time)
CHECKPOINT_DIR = os.path.join('checkpoint', current_time) # Directory name to save the checkpoints
LOGS_DIR = 'logs'
# GPU_IDX can be either integer, array or None. If None, only CPU is used
GPU_IDX = [0]
# GPU_IDX = None
# WEIGHTS_REGULARIZER = slim.l2_regularizer(CONV_WEIGHT_DECAY)
WEIGHTS_REGULARIZER = None
IS_VOXELMAP = True
USE_SOFTMAX_OUTPUT = False
ITERATIONS_PER_DECAY = 30000
INITIAL_LEARNING_RATE = 1e-4
LEARNING_RATE_DECAY_FACTOR = 0.1 # dividing by 10 every decay
class Network(object):
def __init__(self):
self.sess = None
self.saver = None
self.x = None # input images
self.y = None # desired output depth bins
self.y_image_orig = None # desired output depth images original, not used for voxelmap
self.y_image = None # desired output depth images (synthetized from depths)
self.y_image_rank4 = None # desired output depth images in rank4
self.voxelmaps = None # images
self.voxelmaps_test = None
self.images = None # images
self.images_test = None
self.depths = None # depth images
self.depths_test = None
self.depth_bins = None # depth bins
self.depth_bins_test = None
self.depth_reconst = None # depth images, reconstructed from bins (correct depth range...)
self.depth_reconst_test = None
# GPU settings
if type(GPU_IDX) not in [type(None), list, int]:
raise Exception('Wrong GPU_IDX type, must be None, list or int, but is {}'.format(type(GPU_IDX)))
if GPU_IDX is None:
self.config = tf.ConfigProto(device_count={'GPU': 0})
else:
self.config = tf.ConfigProto(log_device_placement=LOG_DEVICE_PLACEMENT)
self.config.gpu_options.allow_growth = False
self.config.gpu_options.allocator_type = 'BFC'
devices_environ_var = 'CUDA_VISIBLE_DEVICES'
if devices_environ_var in os.environ:
available_devices = os.environ[devices_environ_var].split(',')
if len(available_devices):
if isinstance(GPU_IDX, list):
os.environ[devices_environ_var] = ', '.join([available_devices[gpu] for gpu in GPU_IDX])
else:
gpu = GPU_IDX
os.environ[devices_environ_var] = available_devices[gpu]
def resize_layer(self, scope_name, inputs, small_size, big_size, stride=1, rate=1):
with arg_scope([layers.conv2d], rate=rate):
with tf.variable_scope(scope_name) as scope:
conv1 = slim.conv2d(inputs, num_outputs=small_size, scope='conv2', kernel_size=1, stride=stride,
activation_fn=tf.nn.relu,
)
conv1 = slim.conv2d(conv1, num_outputs=small_size, scope='conv3', kernel_size=3, stride=1,
activation_fn=tf.nn.relu,
)
conv1 = slim.conv2d(conv1, num_outputs=big_size, scope='conv4', kernel_size=1, stride=1,
activation_fn=None,
)
conv1b = slim.conv2d(inputs, num_outputs=big_size, scope='conv5', kernel_size=1, stride=stride,
activation_fn=None,
)
# concat
conv1 = conv1 + conv1b
conv1 = tf.nn.relu(conv1, 'relu')
return conv1
def non_resize_layer(self, scope_name, inputs, small_size, big_size, rate=1):
with arg_scope([layers.conv2d], rate=rate):
with tf.variable_scope(scope_name) as scope:
conv1 = slim.conv2d(inputs, num_outputs=small_size, scope='conv2', kernel_size=1, stride=1,
activation_fn=tf.nn.relu,
)
conv1 = slim.conv2d(conv1, num_outputs=small_size, scope='conv3', kernel_size=3, stride=1,
activation_fn=tf.nn.relu,
)
conv1 = slim.conv2d(conv1, num_outputs=big_size, scope='conv4', kernel_size=1, stride=1,
activation_fn=None,
)
# concat
conv1 = conv1 + inputs
conv1 = tf.nn.relu(conv1, 'relu')
return conv1
def initialize_by_resnet(self):
# I initialize only trainable variables, not others. Now is unified saving and restoring
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='network'))
# saver.restore(self.sess, 'init-weights/resnet')
# saver.restore(self.sess, 'init-weights-2/resnet') # initialization with new deconv layer(5,1),(8-4),depth=50
# saver.restore(self.sess, 'init-weights-3/resnet') # initialization with new deconv layer(2,2),(8-2),depth=50
saver.restore(self.sess, 'init-weights-4/resnet') # initialization with new deconv layer(2,2),(8-2),depth=200
print('weights initialized')
def initialize_by_last_version(self):
# I initialize only trainable variables, not others. Now is unified saving and restoring
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='network'))
saver.restore(self.sess, tf.train.latest_checkpoint('checkpoint/2018-05-04--22-57-49'))
print('weights initialized')
def inference(self):
batch_norm_params = {
'decay': BN_DECAY, # also known as momentum, they are the same
'updates_collections': None,
'epsilon': BN_EPSILON,
'scale': True,
'scope': 'batch_norm',
}
with arg_scope([layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
weights_initializer=layers.xavier_initializer(uniform=False),
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=WEIGHTS_REGULARIZER
):
with tf.variable_scope('network') as scope:
self.x = tf.placeholder(tf.float32, shape=[None, dataset.IMAGE_HEIGHT, dataset.IMAGE_WIDTH, 3],
name='x')
conv = slim.conv2d(self.x, num_outputs=64, scope='conv1', kernel_size=7, stride=2,
activation_fn=tf.nn.relu)
print("conv1")
print(conv)
max1 = slim.max_pool2d(conv, kernel_size=3, stride=2, scope='maxpool1')
conv = self.resize_layer("resize1", max1, small_size=64, big_size=256)
print("conv2")
print(conv)
for i in range(2):
conv = self.non_resize_layer("resize2-" + str(i), conv, small_size=64, big_size=256)
conv = self.resize_layer("resize3", conv, small_size=128, big_size=512, stride=2)
l1concat = conv
print("l1concat")
print(l1concat)
for i in range(7):
conv = self.non_resize_layer("resize4-" + str(i), conv, small_size=128, big_size=512)
l2concat = conv
print("l2concat")
print(l2concat)
conv = self.resize_layer("resize5", conv, small_size=256, big_size=1024, rate=2)
l3concat = conv
print("l3concat")
print(l3concat)
for i in range(35):
conv = self.non_resize_layer("resize6-" + str(i), conv, small_size=256, big_size=1024, rate=2)
l4concat = conv
print("l4concat")
print(l4concat)
conv = self.resize_layer("resize7", conv, small_size=512, big_size=2048, rate=4)
l5concat = conv
print("l5concat")
print(l5concat)
for i in range(2):
conv = self.non_resize_layer("resize8-" + str(i), conv, small_size=512, big_size=2048, rate=4)
l6concat = conv
print("l6concat")
print(l6concat)
conv = tf.concat([l1concat, l2concat, l3concat, l4concat, l5concat, l6concat], axis=3)
conv = tf.layers.dropout(conv, rate=0.5)
if IS_VOXELMAP:
conv = slim.conv2d(conv, num_outputs=dataset.DEPTH_DIM, scope='convFinal', kernel_size=3, stride=1,
normalizer_fn=None, activation_fn=None)
print('shape before deconvs: ', conv.shape)
# experimentally adding one more layer
# conv = slim.conv2d_transpose(conv, num_outputs=int(dataset.DEPTH_DIM * 2), kernel_size=2, stride=2, # try these later
# conv = slim.conv2d_transpose(conv, num_outputs=int(dataset.DEPTH_DIM / 2), kernel_size=5, stride=1,
# normalizer_fn=None, activation_fn=tf.nn.leaky_relu, scope='deconv-prefinal')
print('shape before last deconv: ', conv.shape)
# conv = slim.conv2d_transpose(conv, num_outputs=dataset.DEPTH_DIM, kernel_size=8, stride=2,
conv = slim.conv2d_transpose(conv, num_outputs=dataset.DEPTH_DIM, kernel_size=8, stride=4,
normalizer_fn=None, activation_fn=None, scope='deconvFinal')
print('shape after last deconv: ', conv.shape)
else:
conv = slim.conv2d(conv, num_outputs=dataset.DEPTH_DIM + 1, scope='convFinal', kernel_size=3,
stride=1,
normalizer_fn=None, activation_fn=None)
conv = slim.conv2d_transpose(conv, num_outputs=dataset.DEPTH_DIM + 1, kernel_size=8, stride=4,
normalizer_fn=None, activation_fn=None, scope='deconvFinal')
if USE_SOFTMAX_OUTPUT:
probs = slim.softmax(conv, 'softmaxFinal')
else:
probs = conv
probs = tf.identity(probs, 'inference')
conv = tf.identity(conv, 'logits')
print('conv.shape', conv.shape)
return probs, conv
def loss(self, logits):
H = dataset.TARGET_HEIGHT
W = dataset.TARGET_WIDTH
# size is depth dim + 1, because 1 layer is for too distant points, outside of desired area
if IS_VOXELMAP:
self.y = tf.placeholder(tf.float32, shape=[None, H, W, dataset.DEPTH_DIM], name='y')
else:
self.y = tf.placeholder(tf.float32, shape=[None, H, W, dataset.DEPTH_DIM + 1], name='y')
self.y_image = tf.placeholder(tf.float32, shape=[None, H, W], name='y_image')
self.y_image_rank4 = tf.expand_dims(self.y_image, 3)
self.y_image_orig = tf.placeholder(tf.float32, shape=[None, H, W, 1], name='y_orig')
print('labels shape:', self.y.shape)
print('logits shape:', logits.shape)
# depth losses
# cost = self.softmax_loss(labels=self.y, logits=logits)
# cost = losses.information_gain_loss(labels=self.y, logits=logits)
# cost = losses.information_gain_loss_with_undefined(labels=self.y, logits=logits)
# voxelwise losses
cost = losses.logistic_voxelwise_loss_with_undefined(labels=self.y, predicted=logits)
# cost = losses.softmax_voxelwise_loss_with_undefined(labels=self.y, predicted=logits)
# cost = losses.l2_voxelwise_loss_with_undefined(labels=self.y, logits=logits)
tf.summary.scalar("cost", cost)
return cost
def train_op(self, total_loss, global_step, batch_size):
# decay_steps = ITERATIONS_PER_DECAY
# lr = tf.train.exponential_decay(
# INITIAL_LEARNING_RATE,
# global_step,
# decay_steps,
# LEARNING_RATE_DECAY_FACTOR,
# staircase=True)
lr = tf.constant(INITIAL_LEARNING_RATE)
tf.summary.scalar('learning_rate', lr)
# opt = tf.train.AdamOptimizer(lr, epsilon=1e-5)
opt = tf.contrib.opt.NadamOptimizer(lr, epsilon=1e-8)
# opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=True)
return opt.minimize(total_loss, global_step=global_step)
def metrics(self, estimated_depths):
if IS_VOXELMAP:
fpr, tpr, iou, softmax, l1 = self.create_metrics(estimated_depths)
tf.summary.scalar("false positive rate", fpr)
tf.summary.scalar("true positive rate", tpr)
tf.summary.scalar("iou", iou)
tf.summary.scalar("softmax metric", softmax)
tf.summary.scalar("l1 dist on known", l1)
else:
treshold, mre, rms, rmls = self.create_metrics(estimated_depths)
tf.summary.scalar("under treshold 1.25", treshold)
tf.summary.scalar("mean relative error", mre)
tf.summary.scalar("root mean square error", rms)
tf.summary.scalar("root mean log square error", rmls)
def create_metrics(self, estimated_depths):
if IS_VOXELMAP:
voxelmap_pred = estimated_depths
print('self.y shape:', self.y.shape)
print('voxelmap_pred shape:', voxelmap_pred.shape)
fpr = metrics_tf.voxel_false_positive_error(self.y, voxelmap_pred)
tpr = metrics_tf.voxel_true_positive_error(self.y, voxelmap_pred)
iou = metrics_tf.voxel_iou_error(self.y, voxelmap_pred)
softmax = losses.softmax_voxelwise_loss_with_undefined(self.y, voxelmap_pred)
l1 = metrics_tf.voxel_l1_dist_with_unknown(self.y, voxelmap_pred)
return fpr, tpr, iou, softmax, l1
else:
estimated_depths_images = self.bins_to_depth(estimated_depths)
print('self.y_image_rank4 shape:', self.y_image_rank4.shape)
print('estimated_depths_images shape:', estimated_depths_images.shape)
treshold = metrics_tf.depth_accuracy_under_treshold(self.y_image_rank4, estimated_depths_images, 1.25)
mre = metrics_tf.depth_mean_relative_error(self.y_image_rank4, estimated_depths_images)
rms = metrics_tf.depth_root_mean_squared_error(self.y_image_rank4, estimated_depths_images)
rmls = metrics_tf.depth_root_mean_squared_log_error(self.y_image_rank4, estimated_depths_images)
return treshold, mre, rms, rmls
def test_metrics(self, cost, estimated_depths):
if IS_VOXELMAP:
fpr, tpr, iou, softmax, l1 = self.create_metrics(estimated_depths)
sum1 = tf.summary.scalar("test-cost", cost)
sum2 = tf.summary.scalar("test-false positive rate", fpr)
sum3 = tf.summary.scalar("test-true positive rate", tpr)
sum4 = tf.summary.scalar("test-iou", iou)
sum5 = tf.summary.scalar("test-softmax metric", softmax)
sum6 = tf.summary.scalar("test-l1 dist on known", l1)
return tf.summary.merge([sum1, sum2, sum3, sum4, sum5, sum6])
else:
treshold, mre, rms, rmls = self.create_metrics(estimated_depths)
sum1 = tf.summary.scalar("test-cost", cost)
sum2 = tf.summary.scalar("test-under treshold 1.25", treshold)
sum3 = tf.summary.scalar("test-mean relative error", mre)
sum4 = tf.summary.scalar("test-root mean square error", rms)
sum5 = tf.summary.scalar("test-root mean log square error", rmls)
sum6 = tf.summary.image("test-predicted_depths", tf.expand_dims(estimated_depths, 3))
return tf.summary.merge([sum1, sum2, sum3, sum4, sum5, sum6])
@staticmethod
def bins_to_depth(depth_bins):
weights = np.array(range(dataset.DEPTH_DIM)) * dataset.Q + | np.log(dataset.D_MIN) | numpy.log |
import MDSplus as mds
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.widgets import Slider
from scipy.ndimage import median_filter
import sys
import numpy as np
mask_y = np.arange(195, 384, 4)
mask_x = | np.arange(3, 512, 8) | numpy.arange |
import os,time
import numpy as np
import rawpy
import glob, scipy.io
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from model import SeeInDark
from net_canny import Net
from pytorch_msssim import MSSSIM
saveimg = True
ps = 512 #patch size for training
device = torch.device('cuda') #'cuda:'+os.environ['CUDA']#torch.device('cuda') #if torch.cuda.is_available() else 'cpu')
DIR = '/home/cse/ug/15074014/'
if os.environ["USER"] == "ketankr9":
DIR = '/home/ketankr9/workspace/mtp/codes/'
device = torch.device('cpu')
ps = 256
input_dir = DIR + 'Sony/short/'
gt_dir = DIR + 'Sony/long/'
result_dir = model_dir = DIR + 'Sony/result_Sony_edge_psnr_ssim/'
os.system('mkdir -p '+result_dir)
chpkdir = model_dir+'checkpoint_sony_resume.pth'
writer = SummaryWriter(result_dir+'log')
print(device)
#get train and test IDs
train_fns = glob.glob(gt_dir + '0*.ARW')
train_ids = []
for i in range(len(train_fns)):
_, train_fn = os.path.split(train_fns[i])
train_ids.append(int(train_fn[0:5]))
test_fns = glob.glob(gt_dir + '/1*.ARW')
test_ids = []
for i in range(len(test_fns)):
_, test_fn = os.path.split(test_fns[i])
test_ids.append(int(test_fn[0:5]))
save_freq = 100
DEBUG = 0
if DEBUG == 1:
save_freq = 100
train_ids = train_ids[0:5]
test_ids = test_ids[0:5]
def pack_raw(raw):
#pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512,0)/ (16383 - 512) #subtract the black level
im = np.expand_dims(im,axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2,0:W:2,:],
im[0:H:2,1:W:2,:],
im[1:H:2,1:W:2,:],
im[1:H:2,0:W:2,:]), axis=2)
return out
# LOSS REDUCED MEAN
mseclass = nn.MSELoss(reduction='mean')
def reduce_mean(out_im, gt_im):
return mseclass(out_im, gt_im)
# LOSS VGG
from utils import VGGLoss, GaussianSmoothing
vggloss = VGGLoss(device=device)
gaussianSmoothing = GaussianSmoothing(3, 5, 1, device=device)
# LOSS COLORLOSS
def colorloss(out, gt):
out = gaussianSmoothing(out)
gt = gaussianSmoothing(gt)
return torch.abs(out-gt).mean()
# MSSSIM
msssim = MSSSIM().to(device)
# LOSS CANNY
canny = Net(threshold=3.0, device=device).to(device)
canny.eval()
def canny_loss(out_im, gt_im, ch=""):
blurred_img1, grad_mag1, grad_orientation1, thin_edges1, thresholded1, early_threshold1 = canny(gt_im)
blurred_img2, grad_mag2, grad_orientation2, thin_edges2, thresholded2, early_threshold2 = canny(out_im)
if ch == '1':
return mseclass(thresholded1, thresholded2)
elif ch == '1bool':
return mseclass(thresholded1!=zero, thresholded2!=zero)
elif ch == '2':
return mseclass(early_threshold1, early_threshold2)
elif ch == '2bool':
return mseclass(early_threshold1!=zero, early_threshold2!=zero)
elif ch == '3':
return mseclass(thresholded1, thresholded2) + mseclass(early_threshold1, early_threshold2)
elif ch == '3bool':
return mseclass(thresholded1!=zero, thresholded2!=zero) + mseclass(early_threshold1!=zero, early_threshold2!=zero)
return mseclass(thresholded1/(thresholded1.max()+1), thresholded2/(thresholded2.max()+1))
#Raw data takes long time to load. Keep them in memory after loaded.
gt_images=[None]*6000
input_images = {}
input_images['300'] = [None]*len(train_ids)
input_images['250'] = [None]*len(train_ids)
input_images['100'] = [None]*len(train_ids)
g_loss = np.zeros((5000,1))
learning_rate = 1e-4
model = SeeInDark().to(device)
opt = optim.Adam(model.parameters(), lr = learning_rate)
#load last saved model weights
if os.path.isfile(chpkdir):
checkpoint = torch.load(chpkdir)
model.load_state_dict(checkpoint['model'])
opt.load_state_dict(checkpoint['optimizer'])
lastepoch = checkpoint['epoch'] + 1
else:
lastepoch = 0
model._initialize_weights()
print("*****lastepoch***** ", lastepoch)
for epoch in range(lastepoch,4001):
cnt=0
if epoch > 2000:
for g in opt.param_groups:
g['lr'] = 1e-5
E_loss = {'CANNY':0, 'MSE':0, 'MSSSIM':0, 'total':0}
for ind in np.random.permutation(len(train_ids)):
# get the path from image id
train_id = train_ids[ind]
in_files = glob.glob(input_dir + '%05d_00*.ARW'%train_id)
in_path = in_files[np.random.random_integers(0,len(in_files)-1)]
_, in_fn = os.path.split(in_path)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW'%train_id)
gt_path = gt_files[0]
_, gt_fn = os.path.split(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure/in_exposure,300)
st=time.time()
cnt+=1
if input_images[str(ratio)[0:3]][ind] is None:
raw = rawpy.imread(in_path)
input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw),axis=0) *ratio
gt_raw = rawpy.imread(gt_path)
im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_images[ind] = np.expand_dims(np.float32(im/65535.0),axis = 0)
#crop
H = input_images[str(ratio)[0:3]][ind].shape[1]
W = input_images[str(ratio)[0:3]][ind].shape[2]
xx = np.random.randint(0,W-ps)
yy = np.random.randint(0,H-ps)
input_patch = input_images[str(ratio)[0:3]][ind][:,yy:yy+ps,xx:xx+ps,:]
gt_patch = gt_images[ind][:,yy*2:yy*2+ps*2,xx*2:xx*2+ps*2,:]
if np.random.randint(2,size=1)[0] == 1: # random flip
input_patch = np.flip(input_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
if np.random.randint(2,size=1)[0] == 1:
input_patch = np.flip(input_patch, axis=0)
gt_patch = np.flip(gt_patch, axis=0)
if np.random.randint(2,size=1)[0] == 1: # random transpose
input_patch = np.transpose(input_patch, (0,2,1,3))
gt_patch = | np.transpose(gt_patch, (0,2,1,3)) | numpy.transpose |
import json
import numpy as np
from numba import vectorize, bool_, float64
# These are the lower level clipping functions.
def square_clip(points, bounds):
"""
Clips a square from a tuple describing the position of the square.
:param points: A N x 2 numpy array of x and y coordinates, where x is in column 0
:param bounds: A tuple of length 4, min y and max y coordinates of the square.
:return: A boolean mask, true is within the square, false is outside of the square.
"""
# Extact x y coordinates from cloud
xy = points[["x", "y"]]
# Create masks for each axis
x_in = (xy["x"] >= bounds[0]) & (xy["x"] <= bounds[2])
y_in = (xy["y"] >= bounds[1]) & (xy["y"] <= bounds[3])
stack = np.stack((x_in, y_in), axis=1)
in_clip = np.all(stack, axis=1)
return(in_clip)
def ray_trace(x, y, poly):
"""
Determines for some set of x and y coordinates, which of those coordinates is within `poly`. Ray trace is \
generally called as an internal function, see :func:`.poly_clip`
:param x: A 1D numpy array of x coordinates.
:param y: A 1D numpy array of y coordinates.
:param poly: The coordinates of a polygon as a numpy array (i.e. from geo_json['coordinates']
:return: A 1D boolean numpy array, true values are those points that are within `poly`.
"""
@vectorize([bool_(float64, float64)])
def ray(x, y):
# where xy is a coordinate
n = len(poly)
inside = False
p2x = 0.0
p2y = 0.0
xints = 0.0
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
return(ray(x, y))
def poly_clip(points, poly):
"""
Returns the indices of `points` that are within a given polygon. This differs from :func:`.ray_trace` \
in that it enforces a small "pre-clip" optimization by first clipping to the polygon bounding box. This function \
is directly called by :meth:`.Cloud.clip`.
:param cloud: A cloud object.
:param poly: A shapely Polygon, with coordinates in the same CRS as the point cloud.
:return: A 1D numpy array of indices corresponding to points within the given polygon.
"""
# Clip to bounding box
bbox = poly.bounds
pre_clip_mask = square_clip(points, bbox)
pre_clip = points[["x", "y"]].iloc[pre_clip_mask].values
# Store old indices
pre_clip_inds = | np.where(pre_clip_mask) | numpy.where |
#freqle.py
import trace
import numpy as np
from numpy import random as rnd
from time import time
from termcolor import colored
import matplotlib.pyplot as plt
class bosonGrid():
# This class generates a boson grid mass
def __init__(self,
n_mus = 300,
mu_min = 2.15e-13,
mu_max = 1.27e-12,
scale = 'lin', # can be log or lin (for linearly or logarithmically spaced points)
verbose = False,
v = False):
self.n_mus = n_mus
self.mu_min = mu_min
self.mu_max = mu_max
self.scale = scale
self.boson_grid = None
self.mass_grid_built = False
self.verbose = verbose
self.v = v
def build_mass_grid(self):
if self.verbose or self.v:
print("Building mass grid...")
if self.scale == 'log':
self.boson_grid = np.geomspace(self.mu_min, self.mu_max, self.n_mus)
if self.verbose or self.v:
print("=> Mass Grid was built, a logarithmic scale was used")
elif self.scale == 'lin':
self.boson_grid = np.linspace(self.mu_min, self.mu_max, self.n_mus)
if self.verbose or self.v:
print("=> Mass Grid was built, a linear scale was used")
self.mass_grid_built = True
# ==============================================================================
# ==============================================================================
class cluster(bosonGrid):
def __init__(self,
obs_distance = 200, # distance [kpc] from detector
cluster_eta = 1e3, # system age [yr]
Nbh = 20000, # Number of generated BH systems
Nbins = 36,
mass_dis = 'kroupa', # BH mass distribution (currently, uniform ('unif'), exponential ('exp'), Kroupa ('kroupa') and Triangular ('triang') are supported)
mass_range = [5, 25],
spin_dis = 'gauss', # BH spin distribution (currently unifomr ('unif), gaussian ('gauss') are supported)
spin_range = [0.2, 0.9], # in case of uniform distr
spin_mean = 0.4, # Mean and sigma are used for gaussian distr
spin_sigma = 0.2,
multiple_runs = True, # one has the possibility of performing multiple runs
Nrun = 10,
# Boson Grid Parameters
n_mus = 300,
mu_min = 2.15e-13,
mu_max = 1.27e-12,
scale = 'lin', # can be log or lin (for uniformly or logarithmically spaced points)
verbose = False,
v = False
):
# Check for input errors
if spin_dis not in ['unif', 'gauss']:
raise Exception("The spin distribution must be uniform ('unif') or Gaussian ('gauss')")
if mass_dis not in ['unif', 'exp', 'kroupa', 'triang']:
raise Exception("Supported distributions for BH masses are: \nuniform ('unif')\nexponential ('exp')\nkroupa ('kroupa')\ntriang('triang')")
if scale not in ['lin', 'log']:
raise Exception("The points on the boson mass grid can be distributed logaritmically ('log') or uniformly ('lin')")
# ==================== INITIALIZATION START ============================
# Cosmological Parameters
self.obs_distance = obs_distance
self.cluster_eta = cluster_eta
self.cluster_eta_sec = cluster_eta*365*86400
self.Nbh = Nbh
# Multiple runs
self.Nbins = Nbins
self.Nrun = Nrun
# BH Masses
self.mass_dis = mass_dis
self.Mbh_min = mass_range[0]
self.Mbh_max = mass_range[1]
# BH Spins
self.spin_dis = spin_dis
self.Sbh_min = spin_range[0]
self.Sbh_max = spin_range[1]
self.Sbh_mean = spin_mean
self.Sbh_sigma = spin_sigma
#Characteristic times
self.tau_inst = 0
self.tau_gw = 0
#Mass and Spin storage
self.Bhs = None
# Unmasked mass array (May be useful)
self.massesUM = 0
self.saved_freqs = None
self.saved_amps = None
self.saved_hists_counts = None
self.saved_hists_bins = None
self.saved_freqs_variance = None
self.saved_top_bins = None
self.saved_rsigma_freq = None
self.saved_lsigma_freq = None
'''Here I instantiate a boson mass grid inside the cluster class,
so that i have a list of masses inside this class'''
bosonGrid.__init__(self,
n_mus = n_mus,
mu_min = mu_min,
mu_max = mu_max,
scale = scale,
verbose = verbose,
v = v
)
''' ATTENTION!!!
Here is building the grid, if this is not done correctly no error
will occur, the mass grid inside the cluster will be an array of zeros'''
self.cluster_populated = False
self.wave_emitted = False
self.freq_distr_calculated = False
# ======================= INITIALIZATION END ===============================
# Defining constants
G = 6.67e-11
c = 299792458
Om0 = 2*np.pi/86400 # 1/day
R0 = 5.5e6 # Rotational radius at Livingston (lower latitude)
hbar = 1.054571e-34
onev = 1.60217653e-19
fint = 1e30 # Small interaction regime.
duty = 0.681 # Detectors duty cycle (approximate)
Tobs=365*86400*duty # Here we should use the exact fraction of non-zero data,
def populate(self, export_BH_data = False, verbose = False, v = False):
""" Populating the BH array with randomly extracted masses """
if self.mass_dis == 'unif':
masses = rnd.uniform(self.Mbh_min, self.Mbh_max ,self.Nbh)
Mbh_ave = np.mean(self.Bhs[0])
if verbose or v:
print(f"Black Holes born correctly")
print(f"=> You now own a population of {self.Nbh} Black holes")
print(f"=> Masses are uniformly distributed from {self.Mbh_min} to {self.Mbh_max} solar masses")
elif self.mass_dis == 'exp':
Mbh_ave = self.Mbh_max - self.Mbh_min
R1=1-np.exp(-self.Mbh_min/Mbh_ave)
R2=1-np.exp(-self.Mbh_max/Mbh_ave)
R=rnd.uniform(R1, R2, self.Nbh)
masses=-Mbh_ave*np.log(1-R)
if verbose or v:
print(f"Black Holes born correctly")
print(f"=> You now own a population of {self.Nbh} Black holes")
print(f"=> Masses are exponentially distributed from {self.Mbh_min} to {self.Mbh_max} solar masses")
elif self.mass_dis == 'kroupa':
a=2.3
Mbh_unif = rnd.uniform(0, 1, self.Nbh)
K = (1-a)/(self.Mbh_max**(1-a)-self.Mbh_min**(1-a))
Y = ((1-a)/K*Mbh_unif + self.Mbh_min**(1-a))**(1/(1-a))
jj = [(Y > self.Mbh_min) & (Y < self.Mbh_max)]
masses = Y[tuple(jj)]
if verbose or v:
print(f"Black Holes born correctly")
print(f"=> You now own a population of {self.Nbh} Black holes")
print(f"=> Masses are distributed with kroupa method from {self.Mbh_min} to {self.Mbh_max} solar masses")
elif self.mass_dis == 'triang':
masses = rnd.triangular(self.Mbh_min, self.Mbh_max - self.Mbh_min, self.Mbh_max, self.Nbh)
Mbh_ave = self.Mbh_max - self.Mbh_min
if verbose or v:
print(f"Black Holes born correctly")
print(f"=> You now own a population of {self.Nbh} Black holes")
print(f"=> Masses are triangularly distributed from {self.Mbh_min} to {self.Mbh_max} solar masses")
# Populating the BH array with randomly extracted spins
if self.spin_dis == 'unif':
spins = rnd.uniform(self.Sbh_min, self.Sbh_max, self.Nbh)
if verbose or v:
print(f"=> Your Black Holes now have random spin uniformly distributed from {self.Sbh_min} to {self.Sbh_max}.\n")
elif self.spin_dis == "gauss":
"""
Attention, by simply constructing a np array extracting from a gaussian
distribution, it is possible to extract values of spin out of given range,
instead we prebuild an array and randomly extract from that
"""
step = (self.Sbh_max - self.Sbh_min)/self.Nbh
_ = np.arange(self.Sbh_min, self.Sbh_max, step)
gaussian = rnd.normal(self.Sbh_mean, self.Sbh_sigma, int(1e6))
h, bin_edges = np.histogram(gaussian, bins = self.Nbh, density = True)
p = h * np.diff(bin_edges)
spins = rnd.choice(_, size = self.Nbh, p = p)
if verbose or v:
print(f"=> Your Black Holes now have random spin with mean value {self.Sbh_mean}, a Gaussian distribution was used.\n")
self.Bhs = np.array([masses, spins])
self.massesUM = masses
self.cluster_populated = True
if export_BH_data:
# Returns an array of black holes masses and spin: k-th BH is Bhs[k][mass, spin]
return self.Bhs
def emit_GW(self, remove_undetectable = True, verbose = False, v = False,
minimum_det_freq = 20, maximum_det_freq = 610, tau_inst_mult = 10, use_old = True):
#tracemalloc.start()
if self.cluster_populated & self.mass_grid_built:
if verbose or v:
print("\nEmission of Gravitational Waves...")
start = time()
Mbhs = self.Bhs[0, :]
Sbhs = self.Bhs[1, :]
mus = self.boson_grid
c_3 = self.c*self.c*self.c
alpha = self.G/(c_3*self.hbar)*2e30*Mbhs*mus[:, np.newaxis]*self.onev
# elevation to 9 potency
temp = alpha/0.1
for i in range(8):
temp = temp * alpha/0.1
tau_inst = 27*86400/10.*Mbhs*(1/temp)/Sbhs
# elevetion to 15th pot
temp = alpha/0.1
for i in range(14):
temp = temp * alpha/0.1
tau_gw = 3*6.5e4*365*86400*Mbhs/10*(1/temp)/Sbhs
del temp
freq = 483*(1-0.0056/8*(Mbhs/10.)**2*(mus[:, np.newaxis]/1e-12)**2)*(mus[:, np.newaxis]/1e-12)
if verbose or v:
print(f"=> {freq.shape[0] * freq.shape[1]:.0E} Frequencies calculated")
end = time()
print(f"Total time for frequency calculation: {end - start:.2f} s")
print(f"Seconds per freq: {(end-start)/(freq.shape[0] * freq.shape[1]):.2E} s\n")
freq_max = c_3/(2*np.pi*self.G*2e30*Mbhs)*Sbhs/(1+np.sqrt(1-Sbhs**2)) # ------ Maximum allowed GW frequency
# elevation to 17th pot
temp = alpha/0.1
for i in range(16):
temp = temp * alpha/0.1
mus_2 = mus * mus / 1e-24
fdot = 7e-15 * mus_2[:, np.newaxis] * temp # ----------------------------- Spin-up term due to boson annihilations
fdot2 = 1e-10*(10**17/self.fint)**4*mus_2[:, np.newaxis] * temp# ----------- Spin-up term due to boson emission
fdot = fdot + fdot2
del temp
del fdot2
# This is still to be checked
if use_old:
freq_now = freq + fdot * (self.cluster_eta_sec-tau_inst)
else:
emission_stop = np.minimum(self.cluster_eta_sec, tau_gw)
freq_now = freq + fdot * (emission_stop-tau_inst)
dfdot = self.Om0*np.sqrt(2*np.ceil(freq_now/10)*10*self.R0/self.c)/(2*self.Tobs/self.duty)
if verbose or v:
print("\nCalculating wave amplitudes...")
emission_start = time()
chi_c = 4 * alpha/( 1 + 4. * alpha * alpha)
temp = alpha/0.1
for i in range(6):
temp = temp * alpha/0.1
h0 = 1/np.sqrt(3)*3.0e-24/10*Mbhs*temp*(Sbhs-chi_c)/0.5# --- GW peak amplitude at d=1 kpc
del temp
h0 = h0/self.obs_distance
timefactor = (1+(self.cluster_eta_sec-tau_inst)/tau_gw) # --------------------------- Time-dependent reduction factor
h0 = h0/timefactor
del timefactor
'''
conditions to be met in order to have a potentially detectable signal
(there may be some redundance)
o tau_inst < t0s : superradiance time scale must be shorter than system age
o freq < freq_max : condition for the development of the instability
o 10*tau_inst < tau_gw : we want the instability is fully completed
o chi_i > chi_c : condition for the development of the instability
o (freq>20) & (freq<610) : GW frequency in the search band
o dfdot > fdot : signal spin-up within half bin
'''
self.wave_emitted = True
if verbose or v:
emission_done = time()
print(f"=> Gravitational Waves emitted, elapsed time: {emission_done - emission_start:.2f} s")
if remove_undetectable:
if verbose or v:
print("\nSelecting detectable Waves...")
start_selection = time()
cond = np.array(
(tau_inst < self.cluster_eta_sec) &
(freq > minimum_det_freq) &
(freq < maximum_det_freq) &
(freq < freq_max) &
(tau_inst_mult*tau_inst < tau_gw) &
(Sbhs > chi_c) &
(dfdot > fdot) &
(freq_now < maximum_det_freq)
)
# Applying conditions
Mbhs = Mbhs * cond[:] # This is now a matrix
Sbhs = Sbhs * cond[:] # This is now a matrix
freq_now = freq_now * cond
h0 = h0 * cond
# Removing boson mass that didn't produce any wave
parser = np.any(Mbhs, axis = 1)
Mbhs = Mbhs[parser]
Sbhs = Sbhs[parser]
freq_now = freq_now[parser]
h0 = h0[parser]
self.tau_gw = tau_gw
self.tau_inst = tau_inst
if verbose or v:
print(f"=> {self.n_mus - Mbhs.shape[0]} points were removed from the grid")
self.boson_grid = self.boson_grid[parser]
if verbose or v:
selection_end = time()
print(f"=> Grid Updated - elapsed time: {selection_end - start_selection:.2f} s")
# Updating stored data
if verbose or v:
print("\nSaving data ...")
'''
The code used leaves a 0 in places of BHs that didn't produce observable
waves, by masking the arrays those values will not be considered in calculations.
It is the fastest way to remove those data.
'''
self.Bhs = np.array([Mbhs, Sbhs])
self.saved_freqs = freq_now
self.saved_amps = h0
if verbose or v:
print("=> Data saved\n\n")
elif not self.cluster_populated:
print(colored("================== WARNING! ==================", "red"))
print(colored("No GW was emitted, cluster not populated.", 'red'))
print(colored("Run custer.populate() before trying to emit GWs.", 'red'))
print(colored("================================================", 'red'))
elif not self.mass_grid_built:
print(colored("===================== WARNING! =====================", "red"))
print(colored("No GW was emitted, mass grid not generated.", 'red'))
print(colored("Run custer.build_mass_grid() before trying to emit GWs", 'red'))
print(colored("======================================================", 'red'))
# Some functions to extract data from the cluster
def get_masses(self):
if self.cluster_populated:
# returns a 2D array, every row is the array of masses that produced
# detectable waves
return np.ma.masked_equal(self.Bhs[0], 0)
else:
raise Exception("Cluster was not populated, run cl.populate() before")
def get_spins(self):
if self.cluster_populated:
return np.ma.masked_equal(self.Bhs[1], 0)
else:
raise Exception("Cluster was not populated, run cl.populate() before")
def get_freqs(self):
if self.wave_emitted:
return np.ma.masked_equal(self.saved_freqs, 0)
else:
raise Exception("Cluster was not populated, run cl.emit_GW() before")
def get_amplitudes(self):
if self.wave_emitted:
return np.ma.masked_equal(self.saved_amps, 0)
else:
raise Exception("Cluster was not populated, run cl.emit_GW() before")
def get_freq_variance(self):
if self.freq_distr_calculated:
return self.saved_freqs_variance
else:
raise Exception("Frequency distribution was not calculated, run cl.calc_freq_distr() before.")
# ==========================================================================
'''
Make a function to count len by automatically skip the masked values
'''
# ==========================================================================
def calc_freq_distr(self, nsigma = 1, nbins = 32, norm_distr = True, remove_outliers = True, verbose = False, v = False):
if verbose or v:
print("Calculating the frequency fluctuations...\n")
sigma = { # -------------------------------------------------------------------- Convert sigma to probability range
1 : .68,
2 : .95,
3 : .997
}
sel_sigma = sigma[nsigma]
freqs = np.sort(self.saved_freqs, kind = 'mergesort') # sorting candidates
masked_freqs = np.ma.masked_equal(freqs, 0)
em_BHs = np.count_nonzero(freqs, axis = 1)
n_sel_BHs = np.around(em_BHs * sel_sigma).astype(int)
# Creating the histogram of frequencies
if verbose or v:
print("Making the histograms...")
'''
//// Function for histograms
'''
bins, counts, binsizes = hist_by_row(masked_freqs, nbins=nbins, normalize=norm_distr)
del masked_freqs # Saving Memory space
if verbose or v:
print("=> Saving histograms in:\n=> - cl.saved_hists_counts\n=> - cl.saved_hists_bins")
self.saved_hists_counts = counts
self.saved_hists_bins = bins
# Finding the most occurring frequency
bin_mids = bins[:, :-1] + binsizes[:] / 2
if verbose or v:
print("=> Looking for peaks")
max_freqs_idx = np.argmax(counts, axis = 1)
max_freqs = bin_mids[range(bin_mids.shape[0]), max_freqs_idx]
# Looking for emitted frequency that is closer to the bin
delta_freq = np.abs(freqs[:] - max_freqs[:, np.newaxis])
closest_freq_to_bin_idx = np.argmin(delta_freq, axis = 1)
closest_freq_to_bin = freqs[range(freqs.shape[0]), closest_freq_to_bin_idx]
# Calculatig indices of freqs in sigma
if verbose or v:
print(f"=> Selecting frequencies in {sel_sigma * 100:.0f}% range of peak")
max_idx = np.minimum(freqs.shape[1], closest_freq_to_bin_idx + np.around(n_sel_BHs / 2))
max_idx = max_idx.astype(int) + 1
r = np.arange(freqs.shape[1])
mask = (closest_freq_to_bin_idx[:, None] <= r) & (max_idx[:, None] >= 1)
r_freq_in_sigma = | np.full((freqs.shape[0], freqs.shape[1]), -1e15) | numpy.full |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Test suite for the util.py module.
The tests must be linked with a function space class object in the setUp method:
to run the use:
from esys.bruce import Brick
class Test_utilOnBruce(Test_util_no_tagged_data):
def setUp(self):
self.domain = Brick(10,10,13)
self.functionspace = ContinuousFunction(self.domain)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_utilOnBruce))
unittest.TextTestRunner(verbosity=2).run(suite)
This test assumes that samples with x_0 coordinate 0 are tagged with 1 and all samples tagged with 1 have x_0
coordinate 0.
:note: at this stage this test will not pass as it tests for functionlity that has not been implemented yet. It also
does not test the full functionalitu of util.py yet.
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base, Test_util_values
from test_util_reduction_new import Test_util_reduction_new
from test_util_unary_new import Test_util_unary_new
from test_util_binary_new import Test_util_binary_new
from test_util_binary_leftover import Test_util_binary_leftover
## these aspects are test in the _new tests
#from test_util_overloaded_binary_no_tagged_data import Test_util_overloaded_binary_no_tagged_data
#from test_util_overloaded_binary_with_tagged_data import Test_util_overloaded_binary_with_tagged_data
#from test_util_unary_no_tagged_data import Test_util_unary_no_tagged_data
#from test_util_unary_with_tagged_data import Test_util_unary_with_tagged_data
#from test_util_binary_no_tagged_data import Test_util_binary_no_tagged_data
#from test_util_binary_with_tagged_data import Test_util_binary_with_tagged_data
from test_util_spatial_functions1 import Test_Util_SpatialFunctions_noGradOnBoundary_noContact
from test_util_spatial_functions2 import Test_Util_SpatialFunctions_noGradOnBoundary
from test_util_spatial_functions3 import Test_Util_SpatialFunctions
from test_util_slicing_no_tagged_data import Test_util_slicing_no_tagged_data
from test_util_slicing_with_tagged_data import Test_util_slicing_with_tagged_data
class Test_util_reduction(Test_util_reduction_new):
""" test for reduction operation Lsup,sup,inf for all data types"""
pass
class Test_util_unary(Test_util_unary_new):
""" all unary tests """
pass
class Test_util_binary(Test_util_binary_new, Test_util_binary_leftover):
"""
test for all binary operation
"""
pass
## Testing of these ops is now in Test_util_binary
#class Test_util_overloaded_binary(Test_util_overloaded_binary_no_tagged_data,Test_util_overloaded_binary_with_tagged_data):
#"""test for all overloaded operation"""
#pass
class Test_util(Test_util_unary_new,Test_util_reduction_new, Test_util_binary):
"""all tests"""
pass
class Test_util_overloaded_binary_still_failing(Test_util_base):
"""
these overloaded operations still fail!
- wrong return value of Data binaries (Mantis 0000054)
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.93686078973,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.51662736235119944, 2.8171396846123073])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.4202334273802917, -2.1197211051191838]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(-2.22764991169,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[2.0746979587719538, 0.99992890307042437, -2.3128078094931848, -4.0103712739722654,
4.8853529531011013],
[0.09856857946648212, 0.73520899085847624, -3.6585265509750844, 3.0095320582437939, 3.4125902906059444],
[1.4894150898632059,
-1.4124339049368793, 1.5397397961722188, 4.8841402613336111, 1.1241155288598881], [2.8283598865494408,
1.5980765295723476,
-1.0022373011497274, -2.0622178471715067, 4.9699555072046042]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.15295195292152819, -1.2277210086230577, -4.5404577211866668, -6.2380211856657475,
2.6577030414076193],
[-2.1290813322269999, -1.4924409208350058, -5.8861764626685664, 0.78188214655031185, 1.1849403789124624],
[-0.73823482183027611,
-3.6400838166303613, -0.68791011552126324, 2.6564903496401291, -1.103534382833594], [0.60070997485595878,
-0.62957338212113445,
-3.2298872128432095, -4.2898677588649887, 2.7423055955111222]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.67318656609,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.9409337165894076, 1.6101568824796857], [1.2441782896909706, 1.2872758759353298]],
[[4.022494973005406,
-2.758155583474049], [1.8311643900357311, 4.0940647266277157]], [[2.5378127449303243, 0.063283784588161751],
[4.5495644157820809,
2.8673770080506742]], [[-0.93484143473477577, 4.914438575705228], [-1.951066895455166, -1.2021165219313259]],
[[-0.4220608661301819, -4.9682501775464418], [0.98338081352961559, 3.4054674805751066]], [[3.9967556325744127,
-4.7659141789100659],
[0.34265275409881024, -0.25226631819007572]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.73225284950136693, -3.0630296836110888], [-3.429008276399804, -3.3859106901554448]],
[[-0.6506915930853685, -7.4313421495648235], [-2.8420221760550435, -0.57912183946305884]],
[[-2.1353738211604503,
-4.6099027815026128], [-0.12362215030869361, -1.8058095580401003]], [[-5.6080280008255503,
0.24125200961445348],
[-6.6242534615459405, -5.8753030880221004]], [[-5.0952474322209564, -9.6414367436372164],
[-3.6898057525611589,
-1.2677190855156679]], [[-0.67643093351636185, -9.4391007450008395], [-4.3305338119919643,
-4.9254528842808503]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(4.16645075056,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.5917180025121436, -0.50082927718401749, 0.71261274386013618, 2.4216324938382936],
[2.5988764746053095,
0.15985324844397741, -2.1952754277135025, -2.1102730593254035], [4.7816092243808672, -3.1240954141765496,
4.0831220997721331, 2.4301203557965216]], [[3.4691826046114969, -2.4961081730013177, -4.9623977358253111,
2.2652744558918698],
[0.41830032681767193, -3.2186897293959649, -4.1590967541108324, -1.7789994379155196], [-0.17901184206486764,
-0.85223673399918809, 1.2515459884606104, -4.530305999148645]]], [[[-4.9028671865135838, 3.9106181278983012,
0.69716765577825246, 4.8537569187159395], [-2.8912890367657318, -4.8177854256421764, -4.3303142092509415,
-0.99481907472179198], [-1.2640734452454305, 4.8028129765204639, -2.5491771511234962, 3.2550469051981921]],
[[2.0572417475748761, 3.7392706991121187, 4.5778678295843704, 3.6658188498258486], [-2.7069743698567206,
-2.684769111460461, -3.0941141983763156, -2.1180719361316589], [-1.4744678905986119, 1.926687036555828,
2.2206999030392947, 0.72956973127168734]]], [[[-2.8290294475300151, -3.1467788245496631, 3.6471044178360348,
3.5237454065241209], [-1.6165850845596652, 1.2437746199742081, -2.8022357261752004, -1.9652183524467781],
[-2.3842126490032092, 3.7068998814751613, -1.389546865398994, -1.7153758702474589]], [[-1.0746517242894815,
-4.3575382718398723, 0.93160793707280121, 1.4002531109392731], [-1.5745690740270168, -3.4394046042905124,
4.2641517580348793, -1.7620679696550843], [-4.2559205627171135, 2.1912319337278863, 1.1987265764805723,
-3.2957352772592809]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.7581687530761378, 3.6656214733799768, 4.8790634944241305, 6.5880832444022879],
[6.7653272251693037, 4.3263039990079717, 1.9711753228504918, 2.0561776912385907], [8.9480599749448615,
1.0423553363874447, 8.2495728503361274, 6.5965711063605159]], [[7.6356333551754911, 1.6703425775626766,
-0.7959469852613168, 6.4317252064558641], [4.5847510773816662, 0.94776102116802941, 0.0073539964531619262,
2.3874513126484747], [3.9874389084991266, 3.3142140165648062, 5.4179967390246047, -0.36385524858465068]]],
[[[-0.7364164359495895, 8.0770688784622955, 4.8636184063422467, 9.0202076692799338], [1.2751617137982625,
-0.6513346750781821, -0.16386345868694718, 3.1716316758422023], [2.9023773053185637, 8.9692637270844582,
1.6172735994404981, 7.4214976557621863]], [[6.2236924981388704, 7.905721449676113, 8.7443185801483647,
7.8322696003898429], [1.4594763807072737, 1.4816816391035332, 1.0723365521876786, 2.0483788144323354],
[2.6919828599653823, 6.0931377871198222, 6.3871506536032889, 4.8960204818356816]]], [[[1.3374213030339792,
1.0196719260143312, 7.8135551684000291, 7.6901961570881152], [2.5498656660043291, 5.4102253705382024,
1.3642150243887938, 2.2012323981172162], [1.7822381015607851, 7.8733506320391555, 2.7769038851650003,
2.4510748803165354]], [[3.0917990262745128, -0.19108752127587803, 5.0980586876367955, 5.5667038615032673],
[2.5918816765369774, 0.72704614627348185, 8.4306025085988736, 2.40438278090891], [-0.089469812153119221,
6.3576826842918805, 5.3651773270445666, 0.87071547330471333]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.8454947431609945, 3.4801848055393254]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.181985677208)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([4.0274804203691783, 3.6621704827475092]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.6719646801005306, 4.0262173014652003]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.7355891147806837, -3.0309968912239551])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([6.4075537948812142, 0.99522041024124519]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.209887477038702, 2.087043312051243, 3.7254247294014622,
-3.7510652436671732, 0.70343608099575317], [4.1654611738215745, 1.5418518980850271,
2.7730022594684423, 3.386030420596251, 1.2758288509710365], [2.2174938185138764,
-1.244837837360393, 2.2331288285078887, -1.1442348969501834, 1.9394801392868004],
[0.68612447219195705, 0.7127527031233436, -3.6346644102130776, 2.0671128943191714,
3.7445028703597156]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.82316401579)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0330514928326018, 6.9102073278451428, 8.5485887451953619,
1.0720987721267266, 5.5266000967896529], [8.9886251896154743, 6.3650159138789268,
7.596166275262342, 8.2091944363901508, 6.0989928667649362], [7.0406578343077761,
3.5783261784335068, 7.0562928443017885, 3.6789291188437163, 6.7626441550807002],
[5.5092884879858568, 5.5359167189172434, 1.1884996055808221, 6.8902769101130712,
8.5676668861536154]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-3.62961836797558, 4.0323249470469893, -2.4833229912823516,
-0.0081902035785272886, -0.26448613257378906], [2.0867535529248489, 0.049446344294963751,
4.4906317789174501, 2.6121865600043499, 1.3687146632565392], [4.2509170325103511,
2.9845191554148567, -0.9329820582137387, -0.58236994049271118, -3.4448732067194388],
[-2.3231599587033402, 1.6550934434842866, -4.5990521452319584, -2.1470268566500152,
-3.9698084155531008]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[3.3234017918244003, 3.3386199217996175, -2.5928786077225316,
-4.1429140632213803, 0.42204291369978719], [3.4123580113357495, -3.9076190537235664,
1.8779298531672159, 0.98377543853039562, -4.9365820051249267], [4.5252395032935961,
-4.8193051910732096, 1.060979071451845, -3.2927325266544871, -3.3828356655691971],
[-4.6411804903406182, -0.42921544747540707, -2.4541073523344323, -0.70845691989162329,
-1.2357505826155588]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.3062165761511797, 7.3709448688466068, -5.0762015990048832,
-4.1511042667999076, 0.15755678112599814], [5.4991115642605983, -3.8581727094286027,
6.3685616320846661, 3.5959619985347455, -3.5678673418683875], [8.7761565358039473,
-1.834786035658353, 0.12799701323810631, -3.8751024671471983, -6.8277088722886354],
[-6.9643404490439584, 1.2258779960088795, -7.0531594975663907, -2.8554837765416385,
-5.2055589981686596]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.0819775543023136, 4.4438294149957258], [1.203494127071604,
1.3934659764012478]], [[-1.7207192546012995, 1.128687542370864], [1.013953229943537,
2.0535582502969056]], [[-1.8482126685735398, 0.64499519705235819],
[-4.1200947648310313, 3.8041018736261574]], [[-0.12876390427677542, -0.26859118353213773],
[-2.8945993824974847, -3.3476923883525944]], [[3.1332107854705562, -4.6334666373330595],
[3.0499420638074994, -2.7959034777693104]], [[4.726734207260332, -1.3724501610660034],
[3.3499737674080023, -2.515294322458935]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.860178486532)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.2217990677700952, 5.3040079015279442], [2.0636726136038224,
2.2536444629334662]], [[-0.86054076806908109, 1.9888660289030824], [1.8741317164757554,
2.913736736829124]], [[-0.98803418204132143, 1.5051736835845766], [-3.2599162782988129,
4.6642803601583758]], [[0.73141458225544298, 0.59158730300008067], [-2.0344208959652663,
-2.487513901820376]], [[3.9933892720027746, -3.7732881508008411], [3.9101205503397178,
-1.935724991237092]], [[5.5869126937925504, -0.51227167453378497], [4.2101522539402207,
-1.6551158359267166]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-1.849788129717993, 0.64693319038907493], [3.0379670344950327,
0.80277076526299229]], [[2.4995340022105639, -4.3955703049125949], [0.58331276679079203,
0.044119077451267863]], [[2.2979922792046947, 1.6054844683234073], [0.50524258350986084,
-3.5539312710422779]], [[-1.1980433912188793, -2.6450000406046001], [-2.4128326188310121,
0.80678465051263526]], [[-2.9963692865064209, -1.0152803020104519], [-0.21931259441936035,
-1.153119362615751]], [[-4.2927186206837717, 0.4561872009236847], [3.0860876046130041,
-0.78568544768378068]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.4985389035935222, 1.8888458641158987], [-4.2891085749380489,
2.8296217607019845]], [[-0.8200921678141917, 4.4359194831012676],
[-4.6185751325042244, 0.16520675598470014]], [[-2.801157092531934, 3.6231020804204928],
[1.5439760747845899, 2.0378140868272894]], [[0.99864930993784018, 3.369884315459073],
[4.399815205976239, -4.9546136700941936]], [[1.6240932313892289, -3.4517363344048615],
[2.8668483027947236, 1.1624090061600336]], [[2.6364367974081624, 2.628371373764919],
[-2.5877409052653833, -1.29236451403668]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.3483270333115147, 2.5357790545049737], [-1.2511415404430162,
3.6323925259649767]], [[1.6794418343963722, 0.040349178188672674],
[-4.0352623657134323, 0.209325833435968]], [[-0.50316481332723928, 5.2285865487439001],
[2.0492186582944507, -1.5161171842149885]], [[-0.19939408128103908, 0.72488427485447282],
[1.9869825871452269, -4.1478290195815584]], [[-1.372276055117192, -4.4670166364153134],
[2.6475357083753632, 0.0092896435442826331]], [[-1.6562818232756094,
3.0845585746886037], [0.49834669934762088, -2.0780499617204606]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.026017904532606551, -0.80192450547405958,
0.93785799257835656, -4.4900007911078319], [-1.8444162073720949,
1.2059856695600812, 1.8326324480310756, 3.3745782356451564],
[3.0929324433706693, -0.94197156488767142, -2.3469684397851207,
-4.8976052662192613]], [[1.2658444546015346, 3.0389250549456399,
-2.567254770133963, 3.7513728753285314], [-0.10225306211433605,
-0.34121316520335299, -2.8745573331597321, -0.73976781968982142],
[4.6114590072566681, 3.5325642767850063, 2.1587079910040661,
3.8644723652636905]]], [[[-2.5953113243103623, 0.6437882672443429,
4.5677362343759853, 3.4108524985046262], [2.9904338528780352,
0.73113299006492127, 2.4253724263400445, 3.8646536702562031],
[-1.2545053686514152, -4.2675706218911706, -3.6576679389702105,
-0.29502287354943402]], [[0.9550527228483654, 2.9537233833481267,
-2.6904009310953283, 1.5998857010519698], [-3.7171702199982004,
-1.1578306702024044, 1.764070139728485, -1.1506068782808967],
[1.5727320181060982, 0.18468074769418674, 3.3262967055395372,
-1.2208265816075849]]], [[[-0.25003967903418278, -2.603663543909648,
4.6824047463125531, 1.0968919539473987], [1.3471700099604398,
-3.8321880437450218, -4.2809409903460676, 1.2933005361204906],
[-2.857251250328674, 3.6768205829450178, -2.7999953058490643,
2.1117422072666692]], [[-2.1994223710236427, 3.7669030216280923,
-3.5232105054852991, -3.7071480752824462], [-0.35952695279389246,
2.5451704526750873, -4.2842310996736144, -1.3813503044378783],
[-2.5647173415905145, 4.7437501634141572, -4.2234318870342245,
2.1862042652792866]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.33323555487)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.30721765033724147, -0.46868895060421156,
1.2710935474482046, -4.1567652362379839], [-1.5111806525022469,
1.5392212244299293, 2.1658680029009236, 3.7078137905150044],
[3.4261679982405173, -0.6087360100178234, -2.0137328849152727,
-4.5643697113494133]], [[1.5990800094713826, 3.3721606098154879,
-2.234019215264115, 4.0846084301983794], [0.23098249275551197,
-0.0079776103335049697, -2.541321778289884, -0.4065322648199734],
[4.9446945621265161, 3.8657998316548543, 2.4919435458739141,
4.1977079201335386]]], [[[-2.2620757694405143, 0.97702382211419092,
4.9009717892458333, 3.7440880533744743], [3.3236694077478832,
1.0643685449347693, 2.7586079812098925, 4.1978892251260511],
[-0.92126981378156714, -3.9343350670213226, -3.3244323841003625,
0.038212681320413999]], [[1.2882882777182134, 3.2869589382179747,
-2.3571653762254803, 1.9331212559218178], [-3.3839346651283524,
-0.82459511533255636, 2.097305694598333, -0.81737132341104868],
[1.9059675729759462, 0.51791630256403476, 3.6595322604093852,
-0.88759102673773693]]], [[[0.083195875835665234, -2.2704279890398,
5.0156403011824011, 1.4301275088172467], [1.6804055648302878,
-3.4989524888751737, -3.9477054354762195, 1.6265360909903386],
[-2.524015695458826, 4.0100561378148658, -2.4667597509792163,
2.4449777621365172]], [[-1.8661868161537947, 4.1001385764979403,
-3.1899749506154511, -3.3739125204125981], [-0.026291397924044446,
2.8784060075449354, -3.9509955448037664, -1.0481147495680303],
[-2.2314817867206664, 5.0769857182840052, -3.8901963321643764,
2.5194398201491346]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[1.6204760394819004, -0.95393695229398112, -1.221681223499369, 2.6618713903411937],
[-1.5387523541807724, 4.6220978399651482, -2.1795716817360713, -3.776821154104939], [1.4330066566763016,
3.7880327985429378, -0.65902727001966976, -4.29506128665055]], [[-4.0199255222547103, -3.644811287300751,
3.6508998060332054, -3.569704984460552], [-3.8429890733645489, -2.9119635791576437, 2.3183698092323652,
1.3643661323778851], [2.9328022056563725, -0.080129403375118535, 0.15566128013433289, 2.344258136058456]]],
[[[3.03272210358924, 2.8841814084596393, -4.059068204445289, -0.091640986980607408], [-4.2591024547151859,
-0.36305436045316863, 0.19284537915686428, 4.5041324479849649], [1.2988816365062537, -1.6778808169453416,
-3.5496975707176146, 4.314356820196215]], [[-1.4533462849506518, -1.003910808707118, 3.8948057966291092,
1.266066103629278], [-4.4119138102620346, -2.1246183047037603, -2.4610566322999161, -3.5862383252945271],
[2.9290698526446066, -0.26093763373887136, 0.87809331627623344, -0.47993365832407076]]], [[[2.1717793325666745,
0.83592896851733212, -2.2538107669063279, 1.6303402530881517], [-0.53207705017646578, -4.5214994998308979,
-3.6999121226789988, 3.5355643886671686], [3.3936340080223193, -2.1140030580705247, 1.821327452830638,
-1.6123768640462668]], [[2.3105165926895497, -3.0414367260786292, -1.5788704194425076, 1.0377969965556915],
[1.3575822980511116, 4.3465002873169833, 0.55678010189701688, 4.99079375906609], [4.2819911907361128,
4.9615031124625322, 2.7964852390480104, 0.029646894001982282]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.779495003239937, -4.7840877608643506, 2.651273004571375, -2.179381582597685],
[-0.27370078331190673, -3.6151069379138887, -0.6880481455894909, 4.4373993248198644], [-1.6276288613086387,
-1.6376839670015721, -3.1138607609774835, -2.7809800576738719]], [[0.85446276622548556, -4.3676040003341114,
-4.0083595770538496, -3.915065868011578], [1.6989039436984452, 3.5347026474299419, -1.8748410832866327,
-4.6526613314583045], [1.9480513434936046, 4.7386182205273322, -1.2001630607496541, 1.8094726084650006]]],
[[[4.9996435011863589, 0.60285036470010045, 1.457536438507919, 2.7443970579013879], [4.131864622110669,
0.20996245110639133, 3.3652305004680549, 3.1437873739212119], [-3.0818670302029405, -2.461603163946088,
-0.56609916674720218, -4.1186964404844861]], [[-2.7183232427482262, -2.1509712746053999, -2.281087666097271,
-2.4094567126275344], [-3.4723848022755091, -1.563218902128277, -4.7598832341275878, 1.8751725484288029],
[-4.0474621098792882, 0.59894943914858167, 1.0736279895120182, 4.5015525072725033]]], [[[-3.0082200796749703,
0.23283074563588535, 2.5230303985659734, 4.8262414779000231], [3.3772486493634837, 1.8234317033464915,
-1.7905158376185746, -2.9990918311449244], [-3.6765085717620041, 2.0057610304617572, -2.1487273241068525,
-4.1965541804451352]], [[0.26210933249566715, -2.9167787158271663, -0.89589477578380539, -0.41427249402553912],
[-3.1708181836677332, 4.3890602408555726, -1.1754542095914857, 4.8422639037274919], [-3.0044937138520034,
-4.1626528668210083, 0.20385989364778467, -0.016309737359709864]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.3999710427218375, -5.7380247131583317, 1.429591781072006, 0.48248980774350869],
[-1.8124531374926791, 1.0069909020512595, -2.8676198273255622, 0.66057817071492542], [-0.19462220463233715,
2.1503488315413657, -3.7728880309971533, -7.0760413443244214]], [[-3.1654627560292248, -8.0124152876348624,
-0.35745977102064419, -7.4847708524721295], [-2.1440851296661037, 0.62273906827229819, 0.44352872594573256,
-3.2882951990804195], [4.8808535491499772, 4.6584888171522136, -1.0445017806153212, 4.1537307445234566]]],
[[[8.0323656047755989, 3.4870317731597398, -2.60153176593737, 2.6527560709207805], [-0.12723783260451693,
-0.1530919093467773, 3.5580758796249192, 7.6479198219061768], [-1.7829853936966868, -4.1394839808914297,
-4.1157967374648168, 0.19566037971172889]], [[-4.171669527698878, -3.154882083312518, 1.6137181305318382,
-1.1433906089982564], [-7.8842986125375436, -3.6878372068320373, -7.2209398664275035, -1.7110657768657243],
[-1.1183922572346816, 0.33801180540971032, 1.9517213057882516, 4.0216188489484326]]], [[[-0.83644074710829575,
1.0687597141532175, 0.26921963165964558, 6.4565817309881748], [2.8451715991870179, -2.6980677964844064,
-5.4904279602975734, 0.53647255752224421], [-0.28287456373968478, -0.10824202760876744, -0.3273998712762145,
-5.808931044491402]], [[2.5726259251852168, -5.9582154419057956, -2.474765195226313, 0.62352450253015235],
[-1.8132358856166215, 8.7355605281725559, -0.61867410769446884, 9.833057662793582], [1.2774974768841094,
0.79885024564152385, 3.0003451326957951, 0.013337156642272419]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.50668349593,self.functionspace)
arg0.setTaggedValue(1,-3.09146650776)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.32369560802)
sub=res.substitute({arg1:s1})
ref=Data(-0.81701211209,self.functionspace)
ref.setTaggedValue(1,-7.41516211578)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(3.83444600418,self.functionspace)
arg0.setTaggedValue(1,-0.266863397142)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.6938635924807581, -2.3199399928130826])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.5283095966592981, 1.5145060113654574]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4270001953384694, -2.5868033899553713]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(-2.85642807584,self.functionspace)
arg0.setTaggedValue(1,-0.357260114938)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[4.4124412590911621, 1.732298167196193, 1.8228166076040306, -3.9853565905277355,
3.3793508288079881], [-1.5339512663354116, -2.8915144317379058, -3.6493591659102464, 1.4243106283527815,
-0.6931246781623841], [4.7714119110273394, 0.45700055229079606, 1.2539528503924027, -1.4029360809413403,
2.8915917074007416], [4.2546657221847255, 3.2639891865967527, -0.4712967898993945, -3.9077971138749112,
-3.5655383189938084]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.5560131832472779, -1.1241299086476912, -1.0336114682398536, -6.8417846663716197,
0.52292275296410384], [-4.3903793421792958, -5.74794250758179, -6.5057872417541311, -1.4321174474911027,
-3.5495527540062684], [1.9149838351834552, -2.3994275235530882, -1.6024752254514816, -4.2593641567852245,
0.035163631556857311], [1.3982376463408412, 0.40756111075286849, -3.3277248657432787, -6.7642251897187951,
-6.4219663948376926]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[4.0551811441529519, 1.3750380522579828, 1.4655564926658204,
-4.3426167054659457, 3.0220907138697779], [-1.8912113812736218, -3.248774546676116, -4.0066192808484562,
1.0670505134145714, -1.0503847931005943], [4.4141517960891292, 0.099740437352585865, 0.89669273545419248,
-1.7601961958795505, 2.5343315924625314], [3.8974056072465153, 2.9067290716585426, -0.82855690483760469,
-4.2650572288131219, -3.9227984339320185]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-2.98759917871,self.functionspace)
arg0.setTaggedValue(1,-4.26584239637)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[0.65736935684204045, 1.4685807994312459], [0.99740155640158257, -2.8001282911414127]],
[[-0.80947613326718226, -4.0270117786915378], [1.1564198209626229, -4.917538904347448]], [[-1.0488230155998202,
4.0958534641909754], [-4.9502522108275002, -0.19486641488505008]], [[-4.507307254914509, -0.98539101308887389],
[-4.5909807035957675, 2.4265853650826985]], [[-4.252924691613126, 0.42394291278212481], [3.4198717705842103,
-4.6000003047031024]], [[4.9609535782609235, 3.1625779529060711], [0.26834958946896492, 3.0941570460788874]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-2.3302298218695272, -1.5190183792803218], [-1.9901976223099851, -5.7877274698529799]],
[[-3.7970753119787499, -7.0146109574031055], [-1.8311793577489448, -7.9051380830590157]], [[-4.0364221943113883,
1.1082542854794077], [-7.9378513895390679, -3.1824655935966177]], [[-7.4949064336260767, -3.9729901918004416],
[-7.5785798823073351, -0.56101381362886915]], [[-7.2405238703246937, -2.5636562659294428], [0.43227259187264266,
-7.5875994834146701]], [[1.9733543995493559, 0.17497877419450347], [-2.7192495892426027,
0.10655786736731976]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.6084730395261495, -2.7972615969369441], [-3.2684408399666074,
-7.0659706875096031]], [[-5.0753185296353722, -8.2928541750597269], [-3.1094225754055671, -9.183381300715638]],
[[-5.3146654119680097, -0.16998893217721456], [-9.2160946071956893, -4.46070881125324]], [[-8.773149651282699,
-5.2512334094570638], [-8.8568230999639574, -1.8392570312854915]], [[-8.5187670879813169, -3.8418994835860651],
[-0.84597062578397964, -8.8658427010712924]], [[0.69511118189273358, -1.1032644434621188], [-3.997492806899225,
-1.1716853502893025]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-3.36894529378,self.functionspace)
arg0.setTaggedValue(1,-4.62956527999)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-4.6824549992604805, 0.17860523484039881, -3.9939994980255102, -0.36579022311332743],
[-2.0003582573358858, 3.3436256968249793, -1.5671485178714373, 3.9554829351801821], [4.0499415739210693,
-3.1796189569360358, 0.28181611699077536, 1.4851321313182684]], [[4.9608073066477267, 2.1353944107091136,
3.2103965744924743, 0.36273874746876089], [0.33193515801312934, -1.8768462949087295, -3.5968753845201462,
-1.9342255010038101], [-0.98845968068423407, -2.6505467151645048, -3.9269883741621214, -1.2671783073823359]]],
[[[4.0296290320262234, 0.094183089334959114, -1.6548527114390654, 1.1815006848827636], [4.4205350333429578,
1.0602877007979998, -2.7207610093848364, 2.5749353581909009], [2.368743673752042, 0.36879117257479166,
3.1294699111463196, 3.8766421343643209]], [[-4.2994052301352443, -4.4665347726615128, -4.9654257982784813,
1.4010627781386145], [-0.49010647980719568, 1.1149343027340697, 3.8533389980231654, -1.4762647122950145],
[-2.4078638813490985, 4.4431147205208923, 3.0392301612263246, -2.3032611338556377]]], [[[1.1388924488325571,
4.4978561941078308, -3.3123851704811691, 1.3453478111463726], [4.1779635175178385, 3.1786527767023234,
-2.8109803623964669, 4.7217176158252876], [0.26914741902392958, -1.6630169842885789, -3.6267544687045641,
-4.7016327677304943]], [[0.44478691577550755, 2.9451130426961889, -1.0836274217802466, -4.8754431681482586],
[1.6457024072282014, -1.106310648992209, -3.2732924796145912, 4.7940609535301668], [-4.2482158844391957,
2.2391243759174451, 4.6408645091714327, 4.1449515947243611]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-8.0514002930449351, -3.1903400589440558, -7.3629447918099649, -3.734735516897782],
[-5.3693035511203409, -0.025319596959475277, -4.9360938116558923, 0.5865376413957275], [0.68099628013661473,
-6.5485642507204904, -3.0871291767936793, -1.8838131624661862]], [[1.5918620128632721, -1.233550883075341,
-0.15854871929198033, -3.0062065463156937], [-3.0370101357713253, -5.2457915886931836, -6.9658206783046008,
-5.3031707947882651], [-4.3574049744686887, -6.0194920089489594, -7.2959336679465761, -4.6361236011667906]]],
[[[0.66068373824176874, -3.2747622044494955, -5.0237980052235205, -2.187444608901691], [1.0515897395585032,
-2.3086575929864548, -6.0897063031692911, -0.79400993559355371], [-1.0002016200324126, -3.000154121209663,
-0.23947538263813506, 0.5076968405798663]], [[-7.668350523919699, -7.8354800664459674, -8.3343710920629359,
-1.9678825156458402], [-3.8590517735916503, -2.2540109910503849, 0.48439370423871075, -4.8452100060794692],
[-5.7768091751335531, 1.0741694267364377, -0.32971513255813001, -5.6722064276400923]]], [[[-2.2300528449518975,
1.1289109003233762, -6.6813304642656242, -2.023597482638082], [0.80901822373338383, -0.19029251708213124,
-6.1799256561809219, 1.352772322040833], [-3.099797874760525, -5.0319622780730331, -6.9956997624890187,
-8.0705780615149489]], [[-2.9241583780089471, -0.42383225108826572, -4.4525727155647008, -8.2443884619327132],
[-1.7232428865562532, -4.4752559427766636, -6.6422377733990459, 1.4251156597457122], [-7.6171611782236504,
-1.1298209178670096, 1.2719192153869781, 0.77600630093990652]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-9.3120202792456048, -4.4509600451447255, -8.6235647780106355,
-4.9953555030984518], [-6.6299235373210106, -1.285939583160145, -6.1967137978565621, -0.67408234480494222],
[-0.579623706064055, -7.8091842369211601, -4.347749162994349, -3.144433148666856]], [[0.33124202666260238,
-2.4941708692760107, -1.4191687054926501, -4.2668265325163635], [-4.297630121971995, -6.5064115748938534,
-8.2264406645052706, -6.5637907809889349], [-5.6180249606693584, -7.2801119951496291, -8.5565536541472458,
-5.8967435873674603]]], [[[-0.59993624795890099, -4.5353821906501652, -6.2844179914241902, -3.4480645951023607],
[-0.20903024664216652, -3.5692775791871245, -7.3503262893699608, -2.0546299217942234], [-2.2608216062330824,
-4.2607741074103327, -1.5000953688388048, -0.75292314562080342]], [[-8.9289705101203687, -9.0961000526466371,
-9.5949910782636056, -3.2285025018465099], [-5.11967175979232, -3.5146309772510547, -0.77622628196195897,
-6.1058299922801389], [-7.0374291613342228, -0.18645055946423206, -1.5903351187587997, -6.932826413840762]]],
[[[-3.4906728311525672, -0.13170908587729357, -7.9419504504662939, -3.2842174688387518], [-0.45160176246728589,
-1.450912503282801, -7.4405456423815917, 0.092152335840163246], [-4.3604178609611948, -6.2925822642737028,
-8.2563197486896875, -9.3311980477156187]], [[-4.1847783642096168, -1.6844522372889355, -5.7131927017653705,
-9.505008448133383], [-2.983862872756923, -5.7358759289773333, -7.9028577595997156, 0.16449567354504246],
[-8.8777811644243201, -2.3904409040676793, 0.011299229186308324, -0.48461368526076321]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-4.9434811071655114, 1.7588416724781917]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.0524482361043965, -0.58828792238396233]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.86003727467)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-9.8035183818403411, -3.1011956021966389]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.8075890385704341, -5.4483251970587929]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.47124983588436109, 3.3842142103059487]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([4.4506172428158504, -1.5976912605342894]))
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([2.7380372395241483, -1.2414970456241372])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([3.2092870754085094, 2.1427171646818115]),self.functionspace)
ref.setTaggedValue(1,numpy.array([7.1886544823399987, -2.8391883061584267]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[3.7123556177495072, -1.2322724929891438, -4.3196981967098704, 4.5149190397092358,
-3.4294461596271342], [-0.32526237821140569, 4.906418518064358, 1.6782843293160443, -4.5452294423093242,
-3.4252951962126454], [4.7623389482797158, 4.8957853100883888, 2.4605965522735644, -3.3235939770772349,
-3.6622677868193731], [3.7849671492059009, -3.7965523255405484, -0.98706292680421903, -2.9575953641431996,
3.7235194699440495]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[3.846235478086534, -2.9152984736534773, 2.1299170235868692,
1.4194093106373815, -1.9728564928751369], [0.12730504885223404, -2.4537968289763077, 1.8352652361138375,
-1.1054616749639532, -0.67553225283567997], [-4.6542627767136047, 0.014905560429250286, 0.84138572626791408,
-1.4074784720342515, -3.3322631066777983], [-0.64893500421415951, 4.4524265176475826, -3.5204114624144456,
3.5239615703390363, 2.3718443568961201]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.4845259086)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.1968815263516515, 2.2522534156130005, -0.83517228810772615, 7.9994449483113801,
0.055079748975010112], [3.1592635303907386, 8.3909444266665023, 5.1628102379181886, -1.06070353370718,
0.059230712389498841], [8.2468648568818601, 8.3803112186905331, 5.9451224608757087, 0.16093193152490937,
-0.17774187821722887], [7.2694930578080452, -0.31202641693840416, 2.4974629817979253, 0.52693054445894472,
7.2080453785461938]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[7.3307613866886783, 0.56922743494866701, 5.6144429321890135,
4.9039352192395258, 1.5116694157270074], [3.6118309574543783, 1.0307290796258366, 5.3197911447159818,
2.3790642336381911, 2.8089936557664643], [-1.1697368681114604, 3.4994314690313946, 4.3259116348700584,
2.0770474365678928, 0.15226280192434594], [2.8355909043879848, 7.9369524262497269, -0.035885553812301296,
7.0084874789411806, 5.8563702654982643]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[2.7675952117994296, 0.98431175880226363, -1.8309000840442566, 2.0351166910383416,
2.1718600084175153], [0.64718493825654111, 3.0274641310077364, 4.6031246235215555, -0.072830522019846633,
-3.436466903373192], [-2.7989895712459734, 3.2804563231391093, 3.1416998470123456, 0.25702028842752966,
-3.1553411419958821], [-4.5620989116806543, -0.23300222673645532, -2.3978689464069101, 0.41391436589174457,
-3.7252639362836382]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-2.1509506437818238, -2.5007519800405218, 0.30616207266744233,
-0.46790716227581797, 0.6454558125610621], [1.9589653025955753, -4.9059174981425437, -4.7107956989445992,
2.6150016745692826, -3.3329567586885211], [1.1850451086308738, 3.8781029980110997, -4.7104324292639133,
-4.8362413881812492, 4.9066980390674555], [-1.2440311634968171, -1.6429522113717008, 4.0547225056117124,
-0.33314796054153195, -2.6143781039708855]]))
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-0.0104190624259477, 3.439083370835446, -1.7585221913131677, 3.8784501968475897,
0.08088556648108991], [0.53276272310770789, -1.3171951284400176, -0.841014288686317, 2.4350359443944622,
0.55796159262639922], [-3.3985580423616479, 0.73804937880111687, 0.84641655693241269, -2.0376479444757822,
-0.094456394031885438], [0.8829252865168975, 0.84170422580042903, -1.9539396350167637, -4.8054718599517194,
-0.37594711864698205]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.7571761493734819, 4.4233951296377096, -3.5894222753574243, 5.9135668878859313,
2.2527455748986052], [1.179947661364249, 1.7102690025677187, 3.7621103348352385, 2.3622054223746156,
-2.8785053107467928], [-6.1975476136076217, 4.0185057019402262, 3.9881164039447583, -1.7806276560482526,
-3.2497975360277676], [-3.6791736251637568, 0.60870199906397371, -4.3518085814236738, -4.3915574940599749,
-4.1012110549306202]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-2.1613697062077715, 0.93833139079492422, -1.4523601186457253,
3.4105430345717718, 0.72634137904215201], [2.4917280257032832, -6.2231126265825614, -5.5518099876309162,
5.0500376189637448, -2.7749951660621219], [-2.2135129337307742, 4.6161523768122166, -3.8640158723315006,
-6.8738893326570309, 4.8122416450355701], [-0.36110587697991958, -0.80124798557127175, 2.1007828705949487,
-5.1386198204932514, -2.9903252226178676]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[1.6094791339338048, 4.27222307751477], [4.9486531857239697, -4.5552975586923292]],
[[-0.12032729123703056, -4.1413061177629231], [-2.7473350985925316, 4.7319188820310991]], [[0.13107637034429231,
-3.2138415379490204], [-3.9942457581718696, 1.3262496008026838]], [[2.56850905863657, 1.8321753808437329],
[4.5176482730823331, 4.4664637318837137]], [[0.50860355331966556, 0.55279434819439199], [3.1688695988617859,
-2.6740526298455016]], [[4.4977965557520072, 3.6422271944652209], [3.7948343945899445,
-3.0377990068633332]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-2.9548694146760557, 3.1101651017467038], [-0.31006440672923752,
0.74616091042484989]], [[-3.1016477433464864, 2.9532816390640111], [-2.0494474684559894, -1.1448583599993354]],
[[4.2052724347365604, -1.8157003708847643], [4.8073133555422327, -2.7045312989764492]], [[-2.3803833325202763,
0.19928505008920272], [-2.8622812030202094, 3.9488692362256081]], [[-4.1266217915470236, 4.8461083576413735],
[-3.1895474177762351, 4.4625154514412237]], [[-0.65350755924337811, 2.8015786665738105], [0.94103003425367859,
0.27556367440023166]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.49324308458)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.1027222185118468, 8.765466162092812], [9.4418962703020117, -0.062054474114287217]],
[[4.3729157933410114, 0.35193696681511888], [1.7459079859855104, 9.2251619666091411]], [[4.6243194549223343,
1.2794015466290216], [0.49899732640617245, 5.8194926853807258]], [[7.061752143214612, 6.3254184654217749],
[9.0108913576603751, 8.9597068164617557]], [[5.0018466378977076, 5.046037432772434], [7.6621126834398279,
1.8191904547325404]], [[8.9910396403300492, 8.1354702790432629], [8.2880774791679865,
1.4554440777147089]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.5383736699019863, 7.6034081863247458], [4.1831786778488045,
5.2394039950028919]], [[1.3915953412315556, 7.4465247236420531], [2.4437956161220526, 3.3483847245787066]],
[[8.6985155193146024, 2.6775427136932777], [9.3005564401202747, 1.7887117856015928]], [[2.1128597520577657,
4.6925281346672447], [1.6309618815578326, 8.4421123208036501]], [[0.36662129303101842, 9.3393514422194155],
[1.3036956668018069, 8.9557585360192657]], [[3.8397355253346639, 7.2948217511518525], [5.4342731188317206,
4.7688067589782737]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.7345315461324993, 4.5316724428402377], [-1.2207000383039999, -2.1651454481686692]],
[[-2.5222456135735638, 3.1325113872519896], [0.54140311786327011, -1.6266115642059011]], [[4.3999274072752783,
-0.64510581732829841], [-3.3878893926233533, -0.14783111107246061]], [[2.4816188811184228, 1.505965932327137],
[-2.8128544405052458, 3.2460332510852936]], [[1.5649806120186849, 1.1768584297160487], [-3.3133262672401544,
-2.5740884272652789]], [[2.936076596237732, -0.80694051724477056], [1.6382059835800931,
-0.059174653042079584]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.107948776768561, 4.79459166600315], [-0.070211802843057391,
-2.3000592273671394]], [[1.53142006950028, 0.5983353676488381], [4.2000369856633419, -3.7326077043834074]],
[[-3.6852528003303684, -0.40061815593309014], [4.849947657932514, 3.2046322763443698]], [[4.6824735127774275,
-2.3356975272114679], [-1.4284737023138216, -0.96863966970867921]], [[4.4306883649430571, 0.16250464015770305],
[4.7866411719098583, -1.6949698779239197]], [[-4.9624929004021014, -0.4120760567738655], [-3.510925072784119,
-0.26388846668772636]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.7560333190798687, 0.63030183757017788], [-3.8821224320935288, 4.3508142113739634]],
[[4.3548667192676795, -3.4709315123037445], [-0.19540447292770935, -1.1720138856956916]], [[3.7993994701980398,
-4.5475458462287497], [-0.20650310401114513, -2.7802894344079201]], [[-0.46867874332271242, 0.82685022383334505],
[-3.5357776147305264, 0.7633420403065605]], [[-0.19578164461526359, -4.1370261640670458], [-1.2073883253186946,
0.74664652191646397]], [[-0.697880661399644, -0.46932885527321488], [2.4087818009804716, -1.8245102799854829]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.0215017729473694, 5.1619742804104156], [-5.1028224703975287, 2.1856687632052942]],
[[1.8326211056941157, -0.33842012505175489], [0.34599864493556076, -2.7986254499015928]], [[8.1993268774733181,
-5.1926516635570481], [-3.5943924966344984, -2.9281205454803807]], [[2.0129401377957103, 2.3328161561604821],
[-6.3486320552357718, 4.0093752913918541]], [[1.3691989674034213, -2.9601677343509971], [-4.520714592558849,
-1.8274419053488149]], [[2.238195934838088, -1.2762693725179854], [4.0469877845605646,
-1.8836849330275625]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[7.8639820958484297, 5.4248935035733279], [-3.9523342349365862,
2.050754984006824]], [[5.8862867887679595, -2.8725961446549064], [4.0046325127356326, -4.904621590079099]],
[[0.11414666986767141, -4.9481640021618398], [4.6434445539213689, 0.42434284193644967]], [[4.2137947694547151,
-1.5088473033781229], [-4.9642513170443481, -0.20529762940211871]], [[4.2349067203277935, -3.9745215239093428],
[3.5792528465911637, -0.94832335600745576]], [[-5.6603735618017454, -0.88140491204708038], [-1.1021432718036475,
-2.0883987466732092]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[1.2403285479679145, -0.65355746314869823, 0.23507371305026048, 2.9495208917061202],
[-4.4153187452600653, -1.0271324152128747, 3.6087985228033794, 1.587633224392107], [1.5882989512534262,
-2.3766989521547401, -4.6462509853387939, 1.1425676014861166]], [[-4.8469447836806694, -1.4338245370809863,
-4.8441809139347694, 0.082128480181090424], [4.2695412477206585, -2.0376229192188622, -2.685821131586259,
-4.5654361329152717], [3.5226403567783482, -4.9633770210253347, 4.1637469549065127, -3.5898874968684167]]],
[[[2.7439089503129228, 0.81346375693975492, -2.576882111469688, 4.758878084101946], [0.098363354586225249,
-4.314913184354209, -1.1821682575010484, 4.9687115939178916], [-2.5414207769554564, 1.9836872846103208,
-1.5982744174212127, 4.5509211096426121]], [[4.759533396882766, -4.550347299113696, 4.9394743649799153,
-3.9692445921595421], [1.5755016838325195, 2.6599597206311305, -0.59545966103916648, -1.308464088815966],
[1.7018715016873482, 0.31781368103450536, -0.91184792887657995, -0.60566457689943931]]], [[[-0.365764084374395,
-0.75878286483821444, -3.1104661623240091, -3.7302303444372109], [0.58052395594970907, 0.14085590954626337,
4.6712439745076182, 0.65991412045590181], [-4.5675491076195733, -3.3042112830144132, -2.6719400309110553,
-3.8520603991598765]], [[3.4260488825099618, -1.2789319515430164, 1.8435112511824903, 1.0773214658952854],
[-4.0772283149901236, 1.0211433275718873, -2.015430043082814, 0.1376630245430368], [1.3249956905172624,
3.1987247807146968, 1.0304156332749459, 3.785256475561086]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.8774766185796605, 3.1521364883779448, -4.9233158714840091,
3.7988193665209522], [4.8244393256113263, 2.4688683468745563, -4.5044275072582254, 1.1107496985072052],
[-2.9980383766650376, -4.2922660982517158, 3.4924104659712771, -0.5135964311738892]], [[1.9573144047865201,
-2.2686101409008961, -2.907052414660404, -4.0582253229051144], [-2.0281877168409657, 1.7867206317317663,
0.018511114285918673, -4.0475974398672498], [1.3023403490307315, 1.9932255873687215, -4.6698465653310688,
-4.5630845029599421]]], [[[-1.9525649263627876, -0.72040110769848908, -3.6987029249472769, -3.3184217891099999],
[-4.0519149413902857, 4.1195877398536549, -3.8261874289376463, 3.423780007792768], [0.11768639970294359,
-1.4898880703788131, -1.1746648112150213, -0.28493737967147226]], [[-2.0138403307539932, 3.9987186392010816,
-1.0125535260055338, 0.57376641241565363], [4.213727608092972, 0.51388058678005066, -4.4106027756910908,
-1.9979423050108283], [1.5708368447511347, -1.6270284297780933, -0.55277364435139376, -1.7748804647831715]]],
[[[2.7639070541103061, 2.7303808332951629, 0.41148416591473591, -1.9337000414572802], [-2.7585163378482456,
2.2319457297797207, 3.7988668025967804, 3.6103374331669471], [-4.5925114196923271, -2.1274746711435997,
3.3094547630756779, -4.1386856959210352]], [[-2.1348423629137692, 3.539794593057783, 4.8265405725541157,
4.9426398297282788], [4.5757071915543417, -4.0433372993763399, -0.84096548582416997, 2.0567811910343226],
[4.5367596882428671, -4.9139510999364404, 1.1342166543217944, 1.4859311895053571]]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.83582066753)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.0761492154946453, 4.1822632043780326, 5.0708943805769913, 7.785341559232851],
[0.4205019222666655, 3.8086882523138561, 8.4446191903301102, 6.4234538919188378], [6.424119618780157,
2.4591217153719906, 0.18956968218793691, 5.9783882690128474]], [[-0.011124116153938601, 3.4019961304457444,
-0.008360246408038563, 4.9179491477078212], [9.1053619152473892, 2.7981977483078686, 2.1499995359404718,
0.27038453461145906], [8.358461024305079, -0.12755635349860395, 8.9995676224332435, 1.2459331706583141]]],
[[[7.5797296178396536, 5.6492844244664857, 2.2589385560570427, 9.5946987516286768], [4.934184022112956,
0.52090748317252178, 3.6536524100256824, 9.8045322614446224], [2.2943998905712744, 6.8195079521370516,
3.2375462501055181, 9.3867417771693429]], [[9.5953540644094968, 0.28547336841303483, 9.7752950325066461,
0.86657607536718873], [6.4113223513592503, 7.4957803881578613, 4.2403610064875643, 3.5273565787107648],
[6.537692169214079, 5.1536343485612361, 3.9239727386501508, 4.2301560906272915]]], [[[4.4700565831523358,
4.0770378026885163, 1.7253545052027217, 1.1055903230895199], [5.4163446234764399, 4.9766765770729942,
9.507064642034349, 5.4957347879826326], [0.26827155990715745, 1.5316093845123175, 2.1638806366156755,
0.98376026836685426]], [[8.2618695500366925, 3.5568887159837144, 6.679331918709221, 5.9131421334220162],
[0.75859235253660717, 5.8569639950986181, 2.8203906244439167, 4.9734836920697676], [6.1608163580439932,
8.0345454482414276, 5.8662363008016767, 8.6210771430878168]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.95834404894707026, 7.9879571559046756, -0.087495203957278278,
8.634640034047683], [9.6602599931380571, 7.304689014401287, 0.3313931602685054, 5.946570366033936],
[1.8377822908616932, 0.543554569275015, 8.3282311334980079, 4.3222242363528416]], [[6.7931350723132509,
2.5672105266258347, 1.9287682528663268, 0.77759534462161639], [2.8076329506857651, 6.6225412992584971,
4.8543317818126495, 0.78822322765948094], [6.1381610165574623, 6.8290462548954523, 0.165974102195662,
0.27273616456678873]]], [[[2.8832557411639432, 4.1154195598282417, 1.1371177425794539, 1.5173988784167309],
[0.78390572613644505, 8.9554084073803857, 1.0096332385890845, 8.2596006753194988], [4.9535070672296744,
3.3459325971479177, 3.6611558563117095, 4.5508832878552585]], [[2.8219803367727376, 8.8345393067278124,
3.823267141521197, 5.4095870799423844], [9.0495482756197028, 5.3497012543067815, 0.42521789183563996,
2.8378783625159025], [6.4066575122778655, 3.2087922377486375, 4.283047023175337, 3.0609402027435593]]],
[[[7.5997277216370369, 7.5662015008218937, 5.2473048334414667, 2.9021206260694505], [2.0773043296784852,
7.0677663973064515, 8.6346874701235112, 8.4461581006936779], [0.24330924783440366, 2.7083459963831311,
8.1452754306024087, 0.6971349716056956]], [[2.7009783046129616, 8.3756152605845138, 9.6623612400808465,
9.7784604972550095], [9.4115278590810725, 0.79248336815039089, 3.9948551817025608, 6.8926018585610533],
[9.3725803557695979, -0.078130432409709627, 5.9700373218485252, 6.3217518570320879]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[-3.1509236523814286, 1.680234058442708, -1.7187977550532416, 3.9846453843972913],
[-1.6754979614332322, -3.8450074807346901, -1.5740330789137689, -4.4201074343218751], [2.276529915966389,
-0.80235747833916982, 4.571247045598767, -3.4093255486695617]], [[-4.0166628667791446, -1.3933240066153738,
-1.215071574667598, -3.4706735067142258], [-3.0960303329082572, 4.3009033191704589, 4.4065883064621634,
4.8965445768019009], [-4.4443460968929758, 3.8975314333052253, -4.4153045047286144, 1.7496820405056166]]],
[[[1.634274247051799, -2.4623052709302771, 1.4279180811059975, 0.92544783745377668], [-4.4862942162658106,
-0.17080151547727951, 0.52532922395695625, -0.11419327223481623], [-1.1603038628614835, -2.5757515035829472,
1.9959550719114718, -1.7953240768392242]], [[4.9309159450812103, 3.2298165897638906, -0.075208625571880461,
-1.1899071115534432], [1.6545058865005409, -1.9426363189361773, 1.620629502101667, -4.2257681218133687],
[-0.24689686416986767, 2.1247379677905815, -0.022501917990521925, -1.9988138278359822]]], [[[-2.16170138942825,
1.2184335532362125, 1.1509535832826323, 2.2195238124001797], [2.7455643566460015, 4.6453581322389361,
-4.1082447076462643, -4.0639146315693067], [-4.96116105494092, -3.6915142795866762, -1.2186796693827917,
4.7933913234222967]], [[2.0022553772723217, -0.96891528014022654, -2.5457411370843142, -3.3574915783043058],
[0.10326637441549735, 2.2065594442944327, 3.4159550457557479, -0.71182719653128945], [-1.5473005591196651,
-1.8237704422942014, 3.7660184612895105, -2.1565964302540372]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-2.7644812297675436, 0.40931971763703956, 3.611075059192606,
0.50972765741910564], [-4.2130726841282584, -1.1190277433669751, -0.71203745760782766, -3.152956525368753],
[-1.6186056313723087, 1.1274726343098616, 4.4133392834898437, 1.5220424195160689]], [[0.16147933294385375,
2.4654462130650998, -2.2315133839410328, -4.5248215067907562], [2.2226933853289026, 3.7083490689582508,
1.6042940030913613, 0.26178935291219929], [2.4033332562872989, 2.6116613010273229, -3.5340848426974594,
-4.3871506552920767]]], [[[-2.5011422414749243, -2.9785737952530678, -4.0632268435384287, -2.9061747268645899],
[-3.4361922491984487, 0.92512310228203631, -3.7591410062368915, -0.10199113857196274], [1.4370716393838645,
0.71874746237537668, -4.5480615526025323, -3.9385610102938093]], [[-3.5039474073115562, 1.4740925776889409,
-0.06403798877318323, -3.3828440686373753], [-1.9590119108809123, -0.13446729158123816, -2.4360152863347251,
0.81375486060557112], [2.4638296949211451, 0.84554464160795018, 1.0770605717668191, 0.90311465710515648]]],
[[[-3.0365259446312756, -2.1113062138954444, 3.190598106141481, 4.7146234105400531], [4.7073713389281071,
2.0949812753843036, 1.902801485931489, -0.4384294077249864], [-4.4341512258710214, 4.114619941421422,
4.1663347911930675, -0.082374028629738305]], [[-0.58950965471106098, -1.9744112566224792, -0.0098348725084971278,
2.3871548847218813], [-1.1861224380121662, -3.8703032573387253, 0.2332725218101972, 2.7881117501797101],
[-4.3313677243610327, 2.5428749523942127, 3.9018944633638419, -0.49408732338659789]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.8433628252117984, 1.5322432245117268, 0.55363793461945665, 4.6657626927783653],
[-0.94710403494804751, 3.9800168829397649, 3.0366988370600794, 2.8875431155604332], [-1.188024345098996,
1.0665386751463011, 4.7835901054797993, 2.5969696632689807]], [[-1.99850752062535, 1.1333681341555639,
-0.49718999089842697, 1.1440753369804515], [0.26294280812698378, -3.8684363170040701, 0.061030108864615684,
-4.1179127492349608], [-4.67031644465197, 4.9054510497550492, -0.2640662442281041, 1.363134852748785]]],
[[[-1.4621905107325697, -2.8811881835070574, -2.0127263016810106, 3.9187151372775499], [4.0559843147336121,
3.8748150284806506, -4.7195991819934049, 1.6441241199343715], [1.1018797372155733, 1.5720711461020827,
-2.8718182782954003, -2.4926472889456743]], [[2.1583981297206112, -2.7029142786449709, -4.0306810999276212,
-0.041927417439557857], [2.5297094316362001, 3.2023688131127575, -0.87830172094753056, 1.5087811969314782],
[0.94040146920827272, 1.8042467131134678, 2.6306472495122346, 0.16819275341523543]]], [[[0.15798239523545377,
2.4104584738150319, 2.3850248364278386, 3.2174938931658534], [4.8575582926065533, 0.30772922316230389,
-4.4397211951638047, 0.39063821497748741], [-2.3146321369181688, -3.0703095447217885, 1.7397877979741549,
4.033153568325778]], [[-1.7935270727714037, -3.9682025038313595, -3.4065483616803141, 2.1844510922893523],
[-4.2449404804537032, 1.9572337718531996, -4.6593011375931308, 0.98236210083608633], [4.8624542464851288,
0.5657266529616205, 0.50114562982511135, -3.2736237576584317]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3075608271696302, 3.2124772829544348, -1.165159820433785, 8.6504080771756566],
[-2.6226019963812797, 0.13500940220507474, 1.4626657581463105, -1.5325643187614419], [1.088505570867393,
0.26418119680713126, 9.3548371510785664, -0.81235588540058101]], [[-6.0151703874044946, -0.25995587245980989,
-1.7122615655660249, -2.3265981697337743], [-2.8330875247812735, 0.43246700216638878, 4.4676184153267791,
0.77863182756694016], [-9.1146625415449449, 8.8029824830602745, -4.6793707489567185, 3.1128168932544016]]],
[[[0.17208373631922935, -5.3434934544373345, -0.58480822057501314, 4.8441629747313266], [-0.4303099015321985,
3.7040135130033711, -4.1942699580364486, 1.5299308476995552], [-0.058424125645910152, -1.0036803574808646,
-0.87586320638392845, -4.2879713657848981]], [[7.0893140748018215, 0.52690231111891972, -4.1058897254995017,
-1.2318345289930011], [4.184215318136741, 1.2597324941765802, 0.74232778115413645, -2.7169869248818905],
[0.69350460503840505, 3.9289846809040494, 2.6081453315217127, -1.8306210744207467]]], [[[-2.0037189941927962,
3.6288920270512444, 3.5359784197104709, 5.4370177055660331], [7.6031226492525548, 4.95308735540124,
-8.5479659028100698, -3.6732764165918192], [-7.2757931918590888, -6.7618238243084647, 0.52110812859136324,
8.8265448917480747]], [[0.20872830450091806, -4.9371177839715861, -5.9522894987646282, -1.1730404860149535],
[-4.1416741060382058, 4.1637932161476323, -1.2433460918373829, 0.27053490430479687], [3.3151536873654637,
-1.2580437893325809, 4.2671640911146218, -5.430220187912469]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.92111840455574523, 1.9415629421487663, 4.1647129938120626,
5.1754903501974709], [-5.1601767190763059, 2.8609891395727898, 2.3246613794522517, -0.26541340980831984],
[-2.8066299764713047, 2.1940113094561626, 9.1969293889696431, 4.1190120827850496]], [[-1.8370281876814962,
3.5988143472206637, -2.7287033748394598, -3.3807461698103047], [2.4856361934558864, -0.16008724804581931,
1.665324111955977, -3.8561233963227615], [-2.2669831883646712, 7.5171123507823721, -3.7981510869255635,
-3.0240158025432917]]], [[[-3.9633327522074939, -5.8597619787601252, -6.0759531452194393, 1.0125404104129601],
[0.61979206553516342, 4.7999381307626869, -8.4787401882302973, 1.5421329813624087], [2.5389513765994378,
2.2908186084774593, -7.4198798308979326, -6.4312082992394837]], [[-1.345549277590945, -1.22882170095603,
-4.0947190887008045, -3.4247714860769332], [0.57069752075528779, 3.0679015215315193, -3.3143170072822556,
2.3225360575370493], [3.4042311641294178, 2.649791354721418, 3.7077078212790537, 1.0713074105203919]]],
[[[-2.8785435493958218, 0.29915225991958749, 5.5756229425693196, 7.9321173037059065], [9.5649296315346604,
2.4027104985466075, -2.5369197092323157, -0.047791192747498989], [-6.7487833627891902, 1.0443103966996334,
5.9061225891672224, 3.9507795396960397]], [[-2.3830367274824646, -5.9426137604538383, -3.4163832341888112,
4.5716059770112336], [-5.4310629184658694, -1.9130694854855257, -4.4260286157829336, 3.7704738510157965],
[0.53108652212409613, 3.1086016053558332, 4.4030400931889533, -3.7677110810450296]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.481249850026)+(1.-msk_arg0)*(-1.48465416864)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-2.65110429185)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-3.13235414188)+(1.-msk_ref)*(-4.13575846049)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(1.13411439983)+(1.-msk_arg0)*(-0.629637549331)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([-0.62992419613163175, 4.55886114005793])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.50419020369403444, 5.6929755398835962])+(1.-msk_ref)*numpy.array([-1.259561745462479,
3.9292235907270827])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.01809294358)+(1.-msk_arg0)*(0.889743657807)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-2.793178683106079, -2.6222774715493582, 1.0142792223620747, -3.0640922264732984,
-2.3554298671206055], [0.088775964219395043, 3.4441381957619619, 3.3892189758872853, 2.7423767697866088,
3.977644321141641], [1.4526982641352157, 2.2184052986969505, -3.952710218879385, -4.7169576073736375,
-0.7937042808225101], [2.2686916098744314, -1.553248315886353, -2.7367045745859819, 3.7958840729585344,
1.4548199443717298]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[0.22491426047411567, 0.39581547203083645, 4.0323721659422693, -0.045999282893103732,
0.66266307645958911], [3.1068689077995897, 6.4622311393421565, 6.4073119194674799, 5.7604697133668035,
6.9957372647218357], [4.4707912077154104, 5.2364982422771451, -0.93461727529919036, -1.6988646637934428,
2.2243886627576845], [5.2867845534546261, 1.4648446276938416, 0.28138836899421271, 6.813977016538729,
4.4729128879519244]])+(1.-msk_ref)*numpy.array([[-1.9034350252987218, -1.732533813742001, 1.9040228801694319,
-2.1743485686659412, -1.4656862093132483], [0.97851962202675224, 4.3338818535693191, 4.2789626336946425,
3.632120427593966, 4.8673879789489982], [2.3424419219425729, 3.1081489565043077, -3.0629665610720278,
-3.8272139495662802, 0.096039376984847102], [3.1584352676817886, -0.66350465807899583, -1.8469609167786247,
4.6856277307658916, 2.344563602179087]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.98444562132)+(1.-msk_arg0)*(4.30756765987)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[1.9993822405268356, -3.1230808428690615], [4.9036400439562815, -4.8838867997176525]],
[[0.42763250705520939, 1.7579324334230453], [-3.7242679708963458, 1.8833596506298056]], [[-3.5481907533254931,
0.2040318933875751], [-2.5124574767604746, -4.1576503017979416]], [[2.4187154671810562, -0.51775884222858526],
[-1.722028671225063, 4.8177194310600537]], [[3.5460779618762999, 3.7426721831596925], [-3.14876579453641,
-1.8491069265603413]], [[-2.0602497125201733, 1.8445672729830882], [2.6289048953955998, -2.1171625740448654]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-2.9850633807947604, -8.1075264641906575], [-0.080805577365314463,
-9.8683324210392485]], [[-4.5568131142663866, -3.2265131878985507], [-8.7087135922179417, -3.1010859706917904]],
[[-8.5326363746470886, -4.7804137279340209], [-7.4969030980820701, -9.1420959231195376]], [[-2.5657301541405397,
-5.5022044635501812], [-6.7064742925466589, -0.16672619026154223]], [[-1.4383676594452961, -1.2417734381619034],
[-8.1332114158580069, -6.8335525478819372]], [[-7.0446953338417693, -3.1398783483385078], [-2.3555407259259962,
-7.1016081953664614]]])+(1.-msk_ref)*numpy.array([[[6.3069499004015404, 1.1844868170056433], [9.2112077038309863,
-0.57631913984294769]], [[4.7352001669299142, 6.0655000932977501], [0.58329968897835904, 6.1909273105045104]],
[[0.75937690654921175, 4.5115995532622799], [1.7951101831142302, 0.14991735807676321]], [[6.726283127055761,
3.7898088176461195], [2.5855389886496418, 9.1252870909347585]], [[7.8536456217510047, 8.0502398430343973],
[1.1588018653382948, 2.4584607333143635]], [[2.2473179473545315, 6.152134932857793], [6.9364725552703046,
2.1904050858298394]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.9697925334)+(1.-msk_arg0)*(-4.26135335725)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.9689996783063126, 2.6024749301521517, -2.8657897182202263, 3.4523361907793202],
[1.0646468808240472, 2.2809214673673006, 1.9110441510817342, 3.6637536830808415], [-4.8161620946685977,
1.1260192950202335, -1.5444099528131283, 4.5856953227320361]], [[3.4807853259935388, 1.0632821522370133,
-1.7813251042294, 0.96803702807832348], [-2.2395880868316476, 4.8919502166960243, 3.0915081953974273,
-0.85921425228962178], [-0.24500754865585961, -3.000069805276242, -2.3285433357124861, -3.7526812827715004]]],
[[[-2.6148866735769314, -2.9426881222754986, -2.1105189060422127, -1.718323686970705], [0.38236683235255065,
4.8146833101999391, -0.69724678041282662, -3.674837501299455], [-1.1217878757973345, 1.9457797122429064,
4.3330454272287042, 1.2870165165330079]], [[0.90390350707926448, 4.0932246664578322, 4.0170833493811937,
2.3057200276883218], [-4.1149618340720506, 4.3206785552080422, 4.5478406361616468, 3.4270491303459689],
[-3.2122582790653578, -0.051138136931458078, 2.847106348954056, -2.0922906343243097]]], [[[-3.8470709835005801,
0.79389346854249432, 1.9702586564654192, -1.230993932131331], [0.52027641197917784, 4.1606002966489264,
-4.1240899145057277, 3.0855602864655047], [1.2434749670286918, 1.9421106344042691, -4.7997149299258455,
-3.1016051858236517]], [[-4.0158867307020536, -1.2810983979769732, 4.1806447574751786, 2.4159993753375488],
[3.8210591526688589, 2.9170696329659753, 0.212629682453775, -3.6791629346607402], [-0.52709663403725493,
-2.0893727810689953, -1.7473644406170976, -4.1869442335699976]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[0.99920714490574225, -0.36731760324841867, -5.8355822516207967, 0.48254365737874982],
[-1.9051456525765231, -0.68887106603326975, -1.0587483823188362, 0.69396114968027112], [-7.7859546280691685,
-1.8437732383803369, -4.5142024862136987, 1.6159027893314657]], [[0.51099279259296848, -1.9065103811635571,
-4.7511176376299709, -2.0017555053222469], [-5.2093806202322179, 1.9221576832954539, 0.12171566199685691,
-3.8290067856901921], [-3.21480008205643, -5.9698623386768119, -5.2983358691130569, -6.7224738161720712]]],
[[[-5.5846792069775013, -5.9124806556760685, -5.0803114394427826, -4.6881162203712758], [-2.5874257010480197,
1.8448907767993687, -3.667039313813397, -6.6446300347000253], [-4.0915804091979044, -1.024012821157664,
1.3632528938281339, -1.6827760168675625]], [[-2.0658890263213059, 1.1234321330572619, 1.0472908159806233,
-0.66407250571224852], [-7.0847543674726214, 1.3508860218074719, 1.5780481027610764, 0.45725659694539855],
[-6.1820508124659277, -3.0209306703320284, -0.12268618444651436, -5.0620831677248805]]], [[[-6.8168635169011509,
-2.175899064858076, -0.99953387693515117, -4.2007864655319018], [-2.4495161214213925, 1.190807763248356,
-7.0938824479062976, 0.11576775306493436], [-1.7263175663718786, -1.0276818989963012, -7.7695074633264163,
-6.0713977192242226]], [[-6.9856792641026235, -4.250890931377544, 1.2108522240746082, -0.55379315806302154],
[0.8512666192682885, -0.052722900434595044, -2.7571628509467954, -6.6489554680613105], [-3.4968891674378253,
-5.0591653144695652, -4.7171569740176675, -7.1567367669705675]]]])+(1.-msk_ref)*numpy.array([[[[-0.29235367894345909,
-1.65887842709762, -7.1271430754699985, -0.80901716647045152], [-3.1967064764257245, -1.9804318898824711,
-2.3503092061680375, -0.59759967416893023], [-9.0775154519183694, -3.1353340622295383, -5.8057633100629005,
0.32434196548226435]], [[-0.78056803125623286, -3.1980712050127584, -6.0426784614791718, -3.2933163291714482],
[-6.5009414440814197, 0.63059685944625254, -1.1698451618523444, -5.1205676095393935], [-4.5063609059056313,
-7.2614231625260137, -6.5898966929622578, -8.0140346400212721]]], [[[-6.8762400308267031, -7.2040414795252703,
-6.3718722632919844, -5.9796770442204767], [-3.8789865248972211, 0.5533299529501674, -4.9586001376625983,
-7.9361908585492262], [-5.3831412330471062, -2.3155736450068654, 0.071692069978932516, -2.9743368407167639]],
[[-3.3574498501705072, -0.16812869079193948, -0.244270007868578, -1.9556333295614499], [-8.3763151913218223,
0.059325197958270515, 0.28648727891187509, -0.83430422690380279], [-7.4736116363151295, -4.3124914941812298,
-1.4142470082957157, -6.3536439915740814]]], [[[-8.1084243407503518, -3.4674598887072774, -2.2910947007843525,
-5.4923472893811027], [-3.7410769452705939, -0.10075306060084532, -8.3854432717554985, -1.175793070784267],
[-3.01787839022108, -2.3192427228455026, -9.0610682871756172, -7.3629585430734235]], [[-8.2772400879518244,
-5.5424517552267449, -0.080708599774593104, -1.8453539819122229], [-0.44029420458091284, -1.3442837242837964,
-4.0487236747959967, -7.9405162919105123], [-4.7884499912870266, -6.350726138318767, -6.0087177978668693,
-8.4482975908197702]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([2.1945719955206853,
-3.4851810549539852])+(1.-msk_arg0)*numpy.array([-3.159460740559509, 1.0507096466806898])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.92811762582)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([5.1226896213358133,
-0.5570634291388572])+(1.-msk_ref)*numpy.array([-0.23134311474438096, 3.9788272724958178])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.9387192390641195,
-2.294788495198282])+(1.-msk_arg0)*numpy.array([-3.9950296964046816, -4.9584579002903517])
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.68148355985483988, 0.33396702170122339])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([2.6202027989189594, -1.9608214734970586])+(1.-msk_ref)*numpy.array([-3.3135461365498418,
-4.6244908785891283])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.9335525790389809, 4.8876884032830024, -3.6794048434152948, -2.9337672885330814,
0.5880232587543972], [1.2731441866942719, 4.8021715240969982, 2.9871285060348427, 4.3674026791776921,
2.3324101078324144], [3.257367767879968, 3.614481137699638, -4.0465097244122443, -3.3712543524462166,
0.83424572698980626], [-4.7734011845397317, -1.1918316514932537, -2.641576771310632, -3.7441723823507447,
2.5792398168240602]])+(1.-msk_arg0)*numpy.array([[0.51038147587387783, -3.548018657118809, 3.7494118465432393,
3.6729170048063136, -2.9522974158811746], [3.2109365766033289, -1.7347320393345091, -0.9996429948297223,
-0.75500884718678307, 1.5928790967815267], [-4.1174844249701259, 4.2030131668606234, -4.8484509001230229,
2.7032344298767921, 4.3009935101668333], [-1.4527019870327429, 3.9347061378002781, 1.21415230923688,
-3.666838308237784, -3.8400590973123858]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.22997214356)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[5.1635247225953336, 8.117660546839355, -0.44943269985894219, 0.29620485502327121,
3.8179954023107499], [4.5031163302506245, 8.0321436676533509, 6.2171006495911953, 7.5973748227340447,
5.5623822513887671], [6.4873399114363206, 6.8444532812559906, -0.81653758085589168, -0.14128220888986398,
4.0642178705461589], [-1.5434290409833791, 2.038140492063099, 0.58839537224572069, -0.51420023879439203,
5.8092119603804129]])+(1.-msk_ref)*numpy.array([[3.7403536194302305, -0.31804651356245639, 6.979383990099592,
6.9028891483626662, 0.27767472767517809], [6.4409087201596815, 1.4952401042218435, 2.2303291487266304,
2.4749632963695696, 4.8228512403378794], [-0.88751228141377325, 7.4329853104169761, -1.6184787565666703,
5.9332065734331447, 7.5309656537231859], [1.7772701565236098, 7.1646782813566308, 4.4441244527932326,
-0.43686616468143136, -0.61008695375603317]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[-0.074742989914646785, -1.8482493880577588, 1.0926262448311599, 4.5158483202643716,
-3.0805669333005561], [0.0085606966159099684, -2.9696862086974996, 3.3024460854167597, 1.5088165460119427,
-3.6452065491857266], [0.18694035412066512, -4.6738922180085147, 3.9551045875071438, 4.0084174115638724,
-0.63332177275981749], [2.5093858800842108, -0.36171911019222946, 0.19138395375626427, -3.1795621861527734,
-2.6267949144535008]])+(1.-msk_arg0)*numpy.array([[-3.5942187686631524, -3.7060821431133406, 0.9533196788857623,
-4.8840044000628744, 0.3938790125214453], [4.0652979493208985, 4.5325841421496644, -0.4281905049316661,
-1.742508580451184, 2.7120740894023898], [0.56888661640784566, -2.4569299021956068, 3.568568120069024,
-2.0793352745659766, -1.7689628659930126], [-4.8632954420706014, -2.8828667280653364, 3.4090243893802246,
3.0651732601260697, 4.6463764755640256]])
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-1.4953863183942318, -3.5127993001524969, 2.9138150805794103, -1.6144165168200519,
-0.65062618022498242], [-4.9181569250500168, -2.6971927119277908, 4.2365880197149934, -4.2036145824282496,
2.2260090531531453], [4.0868409931398002, -3.3893548967194032, 2.9012650531553019, -2.2355683566643378,
2.9627609193479501], [4.9921359000605019, 0.6569024014440803, 3.3639734573108839, 0.89356331435440595,
-4.0709626638242327]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.5701293083088785, -5.3610486882102553, 4.0064413254105702, 2.9014318034443196,
-3.7311931135255385], [-4.9095962284341068, -5.6668789206252903, 7.5390341051317531, -2.6947980364163069,
-1.4191974960325813], [4.2737813472604653, -8.0632471147279183, 6.8563696406624457, 1.7728490548995346,
2.3294391465881326], [7.5015217801447127, 0.29518329125185083, 3.5553574110671482, -2.2859988717983675,
-6.6977575782777334]])+(1.-msk_ref)*numpy.array([[-5.0896050870573841, -7.2188814432658379, 3.8671347594651726,
-6.4984209168829263, -0.25674716770353712], [-0.85285897572911828, 1.8353914302218737, 3.8083975147833273,
-5.9461231628794335, 4.9380831425555352], [4.6557276095476459, -5.8462847989150095, 6.4698331732243259,
-4.3149036312303144, 1.1937980533549375], [0.12884045798990051, -2.2259643266212561, 6.7729978466911085,
3.9587365744804757, 0.57541381173979289]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-2.1957568090391955, 0.56747277575122101], [-1.4226171578539604,
-3.1174336379255854]], [[1.9150168705353749, 0.46771483389240665], [-0.73261624542450932, 1.4533109165427449]],
[[-4.3700026677098416, -4.4121889510507675], [-4.2432470132589684, -4.6365817911825937]], [[4.3712760608754326,
0.48815678812850649], [-4.2919585871561221, 2.8753619236403747]], [[4.7410827225779482, -3.2941488290580354],
[3.5834613437014919, 0.53477849558006074]], [[-2.2697241902980902, 1.4839036193452078], [4.3514574228344109,
2.0334834769049763]]])+(1.-msk_arg0)*numpy.array([[[1.9065956016010119, 3.8011536401496766], [4.2481111431072272,
0.7657337986451509]], [[1.7488690210709832, 4.5064595133713876], [-1.261534521038973, -1.5095749568667172]],
[[1.2010203264269057, 0.055494332510111377], [4.3269730839285749, -0.54412407243328076]], [[-2.6257140205956175,
-3.4462245120816002], [1.3451771798822101, 2.462398203439907]], [[-2.5713124204289493, 1.9356323962441504],
[1.8879658089499234, 3.1212800001648091]], [[1.942043508304808, 0.80539011514164471], [-0.3765200612428643,
0.73339801844715691]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.24723235412)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.05147554507665264, 2.8147051298670691], [0.82461519626188773,
-0.87020128380973727]], [[4.162249224651223, 2.7149471880082547], [1.5146161086913388, 3.700543270658593]],
[[-2.1227703135939935, -2.1649565969349194], [-1.9960146591431203, -2.3893494370667456]], [[6.6185084149912807,
2.7353891422443546], [-2.044726233040274, 5.1225942777562228]], [[6.9883150766937963, -1.0469164749421873],
[5.83069369781734, 2.7820108496959088]], [[-0.022491836182242153, 3.7311359734610559], [6.598689776950259,
4.2807158310208244]]])+(1.-msk_ref)*numpy.array([[[4.15382795571686, 6.0483859942655247], [6.4953434972230752,
3.012966152760999]], [[3.9961013751868313, 6.7536918674872357], [0.98569783307687509, 0.73765739724913093]],
[[3.4482526805427538, 2.3027266866259595], [6.574205438044423, 1.7031082816825673]], [[-0.37848166647976944,
-1.1989921579657521], [3.5924095339980582, 4.7096305575557551]], [[-0.32408006631310116, 4.1828647503599985],
[4.1351981630657715, 5.3685123542806572]], [[4.1892758624206561, 3.0526224692574928], [1.8707122928729838,
2.980630372563005]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-3.6330041831896742, 1.9011276595647058], [4.0527837903730326, 3.7453216540822218]],
[[1.1423057067323032, -4.6191355501663702], [-0.19479401086936399, 3.6518312558771875]], [[-0.78164127432320996,
-0.0025588788834731702], [-2.5155059876978534, -2.7853664238124578]], [[-2.4557560474662496, -1.7001261418483038],
[2.2437567320884249, -4.5528490181464578]], [[3.3965240991344601, 2.7531638892344281], [-1.0182649859279858,
0.37879180372082377]], [[-2.2634040587587356, -3.6908761533687482], [-2.6652399154901509,
-2.0159814304593739]]])+(1.-msk_arg0)*numpy.array([[[4.9981907924797788, 4.277720751221235], [-4.4785446333946686,
-3.8140270519701982]], [[1.4517149340948965, 1.9122847710945834], [-1.0984824997077558, 4.9260526287710995]],
[[3.0231870187238314, -4.426803554802202], [-0.1009215503507912, -2.4226611633877337]], [[3.1439947236211125,
-2.7156096061802728], [-0.27949941006709977, 0.15562912547547469]], [[-1.6704879956646712, -0.87822202800174587],
[-4.0968204088950708, -4.8812474874399072]], [[-3.0876637956180186, 0.42808604578959475], [-0.76617423765119153,
1.4811418969805343]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.655791939954395, 1.9082625611635287], [2.0305234873740705, -3.9575879711347337]],
[[0.58883813376680294, -0.44253502109642717], [-0.50659655202841058, 4.7262250303753071]], [[2.3551049262619417,
-2.7472704728416062], [-4.2131185370897501, 1.1560716927603512]], [[-1.8521430501234626, -2.8126771236453196],
[-1.6116964851382032, 4.3144406033510982]], [[-4.4005771771028979, -3.8795508309654512], [0.95903540985898683,
-0.84559016177598512]], [[-2.6007509769442674, -0.13151235868250399], [-1.5038936232862978, -3.9733280592961249]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-7.2887961231440688, 3.8093902207282344], [6.0833072777471031, -0.21226631705251187]],
[[1.7311438404991062, -5.0616705712627974], [-0.70139056289777457, 8.3780562862524945]], [[1.5734636519387317,
-2.7498293517250794], [-6.7286245247876035, -1.6292947310521066]], [[-4.3078990975897122, -4.5128032654936234],
[0.63206024695022167, -0.23840841479535957]], [[-1.0040530779684378, -1.1263869417310231], [-0.059229576068998924,
-0.46679835805516134]], [[-4.8641550357030034, -3.8223885120512522], [-4.1691335387764488,
-5.9893094897554988]]])+(1.-msk_ref)*numpy.array([[[1.3423988525253838, 6.1859833123847636], [-2.4480211460205981,
-7.7716150231049319]], [[2.0405530678616994, 1.4697497499981562], [-1.6050790517361664, 9.6522776591464066]],
[[5.3782919449857731, -7.1740740276438082], [-4.3140400874405413, -1.2665894706273826]], [[1.29185167349765,
-5.5282867298255924], [-1.891195895205303, 4.4700697288265729]], [[-6.0710651727675691, -4.757772858967197],
[-3.137784999036084, -5.7268376492158923]], [[-5.688414772562286, 0.29657368710709076], [-2.2700678609374894,
-2.4921861623155905]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.965007128412612, 3.4584141019026564, -1.0391619896304451, 4.5542963326499351],
[-0.0016792172679549466, -2.9053441565334981, 0.025786108583792711, -0.89554847161554374], [4.4904084527351209,
-0.89553646258473307, 3.8929449623498495, -2.8715607346304415]], [[-3.727374719009604, 2.2555823384608908,
0.53380019017552272, -0.29480940480144113], [-3.6344667828862445, -4.8499559892732567, 3.5342171405331317,
1.9875915936023327], [3.0643486049591804, -2.9482947381564806, 1.257296440825332, -4.4599817600046716]]],
[[[-3.7989993001254971, 4.2006768317373879, -1.9340842456373886, 0.25295780568139836], [0.15305381262779072,
2.184447614622945, -2.0595806484522039, 1.6196719151709491], [-1.550459702477788, 2.2328097059995393,
-3.2648987061947632, -1.7698524550474004]], [[-3.1067614393264673, 3.6490340896776274, 4.2948603770463407,
-3.4382940099694084], [-1.765073080880275, 2.5928931740693892, 2.2530590640640069, 2.7653349815108443],
[-0.88766895991026384, 3.8444038125137965, 3.8283329993863564, 1.6961545196727537]]], [[[-1.6941819291782823,
-4.3507603532160344, 0.58625398426930175, -4.9534370199923137], [4.3258398610183271, 4.7398172498630355,
-0.27425006429631082, -0.80958052389792012], [0.27800145594245151, -0.70646630926925713, -1.3619199397032533,
-0.22712536683851958]], [[-3.7307177958823781, -0.17135910311966995, -1.2454260400370809, 1.8499155339141273],
[0.7652733563966283, -4.2318891899847593, 4.1390775019993704, 2.1086112655335079], [-4.4480501135282662,
4.3290513315610166, -4.1098101623830443, -2.8839598970399614]]]])+(1.-msk_arg0)*numpy.array([[[[3.9323713317642746,
4.4527426387356446, 1.8489227456459432, 2.295838413561385], [-1.5932231826477694, -0.043483214358698064,
2.6866561252017789, -1.3064680912144833], [-4.563955043071191, -4.5294274892608124, 1.1139333008427865,
-3.356095173880258]], [[-0.39784058429088365, 1.3572530126249651, 0.73921609667405086, -2.8036097598039502],
[-1.6466307808609693, -3.6730522383966999, -4.2815488732075613, -3.0943250956889665], [0.84471742986867238,
3.3304241697775492, -2.7207357502431542, -1.8257126717947059]]], [[[0.21030801293033274, 4.6379651350087698,
4.213456762528347, 4.0550184068364885], [-2.5755175539757227, 2.6713165204428986, 3.2808072440183729,
2.8475364996882107], [4.8503832880401561, -0.89396576884489498, 4.8726952699950328, 1.8570156992262419]],
[[-4.6778874236692944, 2.1109769293880465, 0.79097589510131172, -2.1112073984121893], [2.558958067688426,
2.8307096810380727, 0.012443144332241474, -3.7601222060065065], [-1.3755439053562823, 2.9800220614031678,
1.6579582033193425, 4.4427116407434362]]], [[[-0.86660146317817688, 1.3032310329697525, 3.0027070238303377,
-2.9114837729491319], [-3.4567748888099636, 3.3638086688271702, 4.1486162466002519, 2.0749122046757407],
[0.84439318528796647, -3.6592289308593697, 0.77430002321168345, 1.7927967246699836]], [[-1.1981415218608116,
2.3445312580391588, -1.5436298697897444, 1.6111465180751141], [1.6230738725320037, -1.3035089800291666,
-4.6787506207538687, 2.9155460797717678], [3.3315156088599238, -3.5200805068877128, -1.1181004173108544,
-2.2485916181204857]]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.43950171094)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[8.4045088393544027, 6.8979158128444471, 2.4003397213113455, 7.9937980435917257],
[3.4378224936738357, 0.5341575544082926, 3.4652878195255834, 2.543953239326247], [7.9299101636769116,
2.5439652483570576, 7.3324466732916402, 0.56794097631134921]], [[-0.28787300806781335, 5.6950840494026815,
3.9733019011173134, 3.1446923061403496], [-0.19496507194445378, -1.410454278331466, 6.9737188514749224,
5.4270933045441234], [6.5038503159009711, 0.49120697278531011, 4.6967981517671227, -1.0204800490628809]]],
[[[-0.35949758918370645, 7.6401785426791786, 1.5054174653044021, 3.6924595166231891], [3.5925555235695814,
5.6239493255647357, 1.3799210624895868, 5.0591736261127398], [1.8890420084640027, 5.67231141694133, 0.1746030047470275,
1.6696492558943903]], [[0.33274027161532338, 7.0885358006194181, 7.7343620879881314, 0.0012077009723823195],
[1.6744286300615157, 6.0323948850111799, 5.6925607750057976, 6.204836692452635], [2.5518327510315268,
7.2839055234555872, 7.2678347103281471, 5.1356562306145443]]], [[[1.7453197817635084, -0.91125864227424369,
4.0257556952110924, -1.513935309050523], [7.7653415719601178, 8.1793189608048262, 3.1652516466454799,
2.6299211870438706], [3.7175031668842422, 2.7330354016725336, 2.0775817712385374, 3.2123763441032711]],
[[-0.29121608494058737, 3.2681426078221207, 2.1940756709047098, 5.289417244855918], [4.204775067338419,
-0.79238747904296858, 7.5785792129411611, 5.5481129764752986], [-1.0085484025864755, 7.7685530425028073,
-0.67030845144125362, 0.55554181390182933]]]])+(1.-msk_ref)*numpy.array([[[[7.3718730427060652, 7.8922443496774353,
5.2884244565877339, 5.7353401245031757], [1.8462785282940213, 3.3960184965830926, 6.1261578361435696,
2.1330336197273074], [-1.1244533321294004, -1.0899257783190217, 4.5534350117845772, 0.083406537061532671]],
[[3.041661126650907, 4.7967547235667558, 4.1787178076158416, 0.63589195113784047], [1.7928709300808214,
-0.23355052745490923, -0.84204716226577059, 0.34517661525282417], [4.2842191408104631, 6.7699258807193399,
0.71876596069863652, 1.6137890391470848]]], [[[3.6498097238721234, 8.0774668459505605, 7.6529584734701377,
7.4945201177782792], [0.86398415696606801, 6.1108182313846893, 6.7203089549601636, 6.2870382106300013],
[8.2898849989819468, 2.5455359420968957, 8.3121969809368235, 5.2965174101680326]], [[-1.2383857127275038,
5.5504786403298372, 4.2304776060431024, 1.3282943125296014], [5.9984597786302167, 6.2702113919798634,
3.4519448552740322, -0.32062049506471579], [2.0639578055855083, 6.4195237723449585, 5.0974599142611332,
7.8822133516852269]]], [[[2.5729002477636138, 4.7427327439115432, 6.4422087347721284, 0.52801793799265884],
[-0.017273177868172951, 6.8033103797689609, 7.5881179575420425, 5.5144139156175314], [4.2838948962297572,
-0.21972721991757904, 4.2138017341534741, 5.2322984356117743]], [[2.2413601890809791, 5.7840329689809495,
1.8958718411520463, 5.0506482290169048], [5.0625755834737944, 2.1359927309126241, -1.239248909812078,
6.3550477907135585], [6.7710173198017145, -0.080578795945922099, 2.3214012936309363, 1.190910092821305]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[3.2510674404409041, 2.1171696862303406, 2.9610258759664267, -3.8373977579450456],
[0.75383244276133166, 2.4077943881602728, 3.873284406870285, 3.7937584009819574], [-4.6069898901399364,
-2.5452970249895754, 3.650830786457707, -0.56630176651201847]], [[3.6738989513815135, -1.1553536380556686,
4.303352195803182, 2.0201689947921695], [2.5110280594242029, 1.1178178456135743, 3.5722095880572251,
-3.0495901167648221], [-1.8161969765914288, -3.850369287459924, 1.8305771607495833, 3.8129356009276751]]],
[[[4.8159492177547296, -2.7259760165966638, -0.056119891503465524, 3.2320437499651025], [4.1412540490540568,
2.3145635424798332, 4.2298625240821792, -4.9326174629443722], [1.2505234798682396, 4.1728981653768358,
-1.4526511101284445, -0.73865645812869563]], [[-2.5027203270038956, -0.75821705726011146, -2.0074201432570495,
-0.20166798891695503], [1.7962444938241209, 4.9186635916785164, -3.3612255674731486, -3.1402103698143327],
[4.8100127068213077, -3.7003932729639377, -2.3809463861562454, 2.6337296431542621]]], [[[0.8461884816413443,
2.2850095300693116, 3.1039351776827235, 2.7358221987272575], [-1.331100327658973, -2.4718869003284438,
3.8392116060077814, 3.7886003252177218], [-2.740692362699221, -1.1104811343803189, 1.065443269317063,
-1.604926521206449]], [[3.1359320207935291, 2.4159415877072101, -2.9781841648177654, 0.4457695581762291],
[1.4022534028069558, 3.2181877465159641, 4.1561033889739196, -4.5314636502141923], [2.4896032954770373,
-1.6749755107952033, -4.2977752660345292, 4.3862296692093636]]]])+(1.-msk_arg0)*numpy.array([[[[3.8098232095134126,
-2.0180524002497693, 4.420784171182504, -2.4324750966542674], [2.4681882567616125, 3.0279649104786941,
2.2383665512055266, -0.091420157761364251], [4.7846856391630048, 0.45001495814867454, 2.8428137570111911,
3.6542996408716562]], [[-3.3832925941075711, -4.6684050424331947, 2.7145812310865534, 0.57489640415196952],
[3.2363298539062395, -0.28076205609599914, -2.1610563710523598, -3.9600308036480381], [4.1445091213012599,
0.23464603550937735, -4.9214532841127738, 3.7601288072640866]]], [[[4.5878923885513938, -2.7602444517968006,
-2.4823493575559641, -1.1998619544811917], [-1.0165322624110429, 4.8743114304602564, 3.0069704689379755,
2.0086372739622043], [-1.7482883016273565, 4.5233781656491008, 1.0481669308330579, 3.3780108680134457]],
[[-4.5351514069636076, -4.760484108729206, -1.7334568308716203, -4.3080131499917833], [4.0321976091043883,
-2.6576000312675063, 1.3372423488299923, -3.8949616711167625], [3.5793384711817051, 2.60693067621275,
1.8056256765125287, -3.9915454170699869]]], [[[0.39851532295995273, 2.2465287291059273, 0.64170560779626662,
-4.7331314705888738], [3.5329039709028898, -2.5311269573107662, 2.8367974744858193, -4.3457969220676684],
[-1.526677955424999, -2.5983211468943357, -1.3293797580217093, -3.1887378668078279]], [[3.1416335105809505,
0.35146012646543134, 2.428390004415637, 2.7813900205500861], [3.5228217461650111, -0.012304332300811183,
-3.1395042313107369, 4.8647351561551702], [2.2570133784920099, -1.7535240218446777, 0.38792070998653028,
-0.21839923153693785]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-0.55399336747432937, -3.6468486902030306, 2.4533567494215669, 4.8267547347789659],
[1.1480960590338416, 3.5599245920968787, -2.8247534868419724, -2.2031349101131505], [1.7520095897646017,
4.4293583295521266, -3.2046920932014888, -3.8760923163847472]], [[3.9288042477427645, 1.103593535294765,
0.62546922225950485, 2.5431633219905123], [2.5483588394973191, -0.82358610517599207, -0.47010674146441023,
2.7635563586840011], [3.5616440522317419, 2.2995934729430481, -3.501591556463012, 1.3778428754586027]]],
[[[-4.3918539920661051, 0.24976043236636869, -2.4847081470778463, 4.8636790550226792], [-4.2172400078729559,
-2.0316184192507647, -0.53464794178739794, -0.035422588600630966], [1.7049703562375615, 4.2019750499164399,
-3.7430217705554858, -3.4952387702082346]], [[-0.39925876875124189, 1.4505137462439404, -4.1941814051173072,
-1.844757872605356], [-3.4448187389632414, -3.5340944666273377, -3.178247383159305, -1.7824872241435519],
[-3.6843631882800798, -4.1186208792142187, 2.0636953370355959, -0.18717114434561122]]], [[[-2.4316812831173742,
0.39582208925882689, 1.4893695917228467, -3.1232026180567773], [2.1122901499636226, 4.9884613457151978,
-4.7793541216702149, -3.9541373136233391], [-4.8256481088328194, -0.10764491664526066, 2.9970513787255895,
-1.0443943611478437]], [[3.6491162738908258, 3.4225261399204765, -2.9600723325757849, 3.3422667802452324],
[-3.763493116056098, 4.6894908619506595, 2.532040050484988, 0.99028387045053101], [2.5962274887920085,
-0.2721955960411897, -4.7946284910477441, -0.96141278632713245]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.6970740729665748, -1.52967900397269, 5.4143826253879936, 0.98935697683392032],
[1.9019285017951733, 5.9677189802571515, 1.0485309200283126, 1.5906234908688068], [-2.8549803003753347,
1.8840613045625512, 0.44613869325621813, -4.4423940828967652]], [[7.6027031991242779, -0.051760102760903592,
4.9288214180626868, 4.5633323167826818], [5.059386898921522, 0.29423174043758227, 3.1021028465928149,
-0.28603375808082099], [1.7454470756403131, -1.550775814516876, -1.6710143957134287, 5.1907784763862779]]],
[[[0.42409522568862457, -2.4762155842302951, -2.5408280385813118, 8.0957228049877816], [-0.075985958818899135,
0.28294512322906851, 3.6952145822947813, -4.9680400515450032], [2.9554938361058012, 8.3748732152932757,
-5.1956728806839303, -4.2338952283369302]], [[-2.9019790957551375, 0.6922966889838289, -6.2016015483743567,
-2.046425861522311], [-1.6485742451391205, 1.3845691250511787, -6.5394729506324536, -4.922697593957885],
[1.1256495185412279, -7.8190141521781564, -0.3172510491206495, 2.4465584988086508]]], [[[-1.5854928014760299,
2.6808316193281385, 4.5933047694055702, -0.38738041932951983], [0.78118982230464962, 2.516574445386754,
-0.94014251566243345, -0.16553698840561726], [-7.5663404715320404, -1.2181260510255796, 4.0624946480426525,
-2.6493208823542926]], [[6.7850482946843549, 5.8384677276276866, -5.9382564973935503, 3.7880363384214615],
[-2.3612397132491423, 7.9076786084666235, 6.6881434394589077, -3.5411797797636613], [5.0858307842690458,
-1.9471711068363931, -9.0924037570822733, 3.4248168828822312]]]])+(1.-msk_ref)*numpy.array([[[[3.2558298420390832,
-5.6649010904527994, 6.8741409206040709, 2.3942796381246985], [3.6162843157954541, 6.5878895025755728,
-0.58638693563644573, -2.2945550678745148], [6.5366952289276066, 4.8793732877008011, -0.36187833619029774,
-0.22179267551309101]], [[0.54551165363519338, -3.5648115071384296, 3.3400504533460582, 3.1180597261424818],
[5.7846886934035586, -1.1043481612719912, -2.63116311251677, -1.196474444964037], [7.7061531735330018,
2.5342395084524254, -8.4230448405757858, 5.1379716827226893]]], [[[0.19603839648528876, -2.5104840194304319,
-4.9670575046338108, 3.6638171005414875], [-5.2337722702839988, 2.8426930112094917, 2.4723225271505775,
1.9732146853615733], [-0.043317945389794943, 8.7253532155655407, -2.6948548397224279, -0.11722790219478885]],
[[-4.9344101757148495, -3.3099703624852657, -5.9276382359889279, -6.1527710225971397], [0.58737887014114687,
-6.1916944978948436, -1.8410050343293127, -5.6774488952603139], [-0.10502471709837469, -1.5116902030014687,
3.8693210135481246, -4.1787165614155981]]], [[[-2.0331659601574215, 2.6423508183647542, 2.1310751995191133,
-7.8563340886456512], [5.6451941208665124, 2.4573343884044316, -1.9425566471843956, -8.2999342356910084],
[-6.3523260642578183, -2.7059660635395963, 1.6676716207038802, -4.2331322279556716]], [[6.7907497844717764,
3.7739862663859078, -0.53168232816014793, 6.1236568007953185], [-0.24067136989108695, 4.6771865296498483,
-0.60746418082574882, 5.8550190266057012], [4.8532408672840184, -2.0257196178858674, -4.4067077810612139,
-1.1798120178640703]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(1.30830371112,self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0412291309402)
sub=res.substitute({arg1:s1})
ref=Data(1.26707458018,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.2604726935,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.8546037299533653, -1.305392606117024])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.4058689635493371, -2.9550800873856784]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(0.902009664206,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-3.117681444740418, -3.2512793024980069, -3.7762244881344218, -0.50644943812549315,
3.066726444630655], [-2.6348956508380805, -0.90372740616696667, 0.5252271533586752, 2.0132741900533446,
2.0837322808099037], [0.088376617597372586, 0.67864487020517306, 3.7057383001711681, 1.0445042366908988,
-2.1093161712985955], [4.328915747720707, -0.73501622742024342, -0.088412628376807412, -3.0414953794209754,
1.610361274316344]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.0196911089468177, 4.1532889667044071, 4.6782341523408215, 1.4084591023318929,
-2.1647167804242553], [3.5369053150444802, 1.8057370703733664, 0.37678251084772452, -1.1112645258469449,
-1.181722616603504], [0.81363304660902713, 0.22336479400122666, -2.8037286359647684, -0.14249457248449904,
3.0113258355049952], [-3.4269060835143073, 1.6370258916266431, 0.99042229258320713, 3.9435050436273751,
-0.7083516101099443]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(4.30012329043,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-2.4328051948060772, 1.3096803933228829], [-1.9201038070201615, 2.2529209930562519]],
[[4.4911763191005498, -0.0070408039855616167], [-4.5070979412665588, 0.23394826644475319]], [[-2.0679275681214171,
4.7260141882743518], [-1.9530690972223672, 4.2165911161948344]], [[4.2340594486013217, 0.31531838157863668],
[1.2102543060708451, 4.5768051588147358]], [[4.9016533619135778, 1.0237157761801843], [-1.6198381225390657,
1.509534129406096]], [[-2.8351524725878399, -0.8712771035569391], [-1.2500793307427105, 0.52784760832550681]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.732928485237343, 2.990442897108383], [6.2202270974514278, 2.0472022973750139]],
[[-0.19105302866928398, 4.3071640944168275], [8.8072212316978238, 4.0661750239865126]], [[6.3680508585526834,
-0.42589089784308598], [6.2531923876536331, 0.083532174236431445]], [[0.066063841829944181, 3.9848049088526292],
[3.0898689843604208, -0.27668186838346998]], [[-0.60153007148231197, 3.2764075142510816], [5.9199614129703315,
2.7905891610251699]], [[7.1352757630191057, 5.1714003939882049], [5.5502026211739768,
3.772275682105759]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-3.5839426267,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-2.9729696451374421, 2.7845056200381855, 0.070436437102223692, 0.66836223796868044],
[0.40381761203578836, -1.7869220467261826, -4.3681167712065552, 1.0762008553734699], [-3.4293067325266744,
-3.8959384230092855, -4.2869773308861872, -3.5982581222849266]], [[3.8085384127848325, -4.9902013750126919,
1.7025140755302903, -1.8585391591273237], [-1.8948326373524536, 2.0874520505745666, -1.8647114753321095,
3.9665649921657007], [-2.6617432109425376, -0.043781338271665859, -4.3924469058705498, -4.6038566089651081]]],
[[[4.1612414942039617, -0.24691459950937489, 1.8801077349311939, -4.0607604598486082], [-0.48975931816079132,
4.776651055544292, 2.5892649853139229, 2.6300466396994988], [-0.6331493645323949, -4.8747858313906498,
2.5714462579440713, -0.12625615907892662]], [[1.8766405716198298, 0.97931619405259518, -1.2333119307639082,
3.632140408148242], [0.96979041799351151, -4.0819837173164526, 3.4625138677193164, -1.7431511130821575],
[-2.7530992377422381, -3.1495479306859906, 1.3466227111831488, -2.3016323722421128]]], [[[-2.8378224290103491,
-0.7230057223129247, 0.95865498114414649, 0.14297561114879365], [2.3319242484901492, 4.9972541799736234,
-1.7121650896762564, 1.6097551517446558], [2.7133813837524077, -3.1913323682416994, -0.39896207531318861,
-3.2753783571190107]], [[1.3158800827274399, -0.034075573686918936, 3.2707189112070392, -2.9118211235462041],
[4.362994678434946, -3.2771781302292515, 3.4919565479064456, 1.6061522420425254], [-1.8973785117347788,
-4.4461539342202174, -3.8132486661529263, -0.74231592463494511]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.61097298156602342, -6.368448246741651, -3.6543790638056892, -4.252304864672146],
[-3.9877602387392539, -1.7970205799772829, 0.78417414450308964, -4.6601434820769354], [-0.15463589417679113,
0.31199579630581997, 0.70303470418272163, 0.014315495581461057]], [[-7.392481039488298, 1.4062587483092264,
-5.2864567022337559, -1.7254034675761418], [-1.689109989351012, -5.6713946772780321, -1.7192311513713561,
-7.5505076188691662], [-0.9221994157609279, -3.5401612884317997, 0.80850427916708423, 1.0199139822616425]]],
[[[-7.7451841209074272, -3.3370280271940906, -5.4640503616346594, 0.4768178331451427], [-3.0941833085426742,
-8.3605936822477567, -6.1732076120173884, -6.2139892664029643], [-2.9507932621710706, 1.2908432046871843,
-6.1553888846475369, -3.4576864676245389]], [[-5.4605831983232953, -4.5632588207560607, -2.3506306959395573,
-7.2160830348517075], [-4.553733044696977, 0.49804109061298707, -7.0464564944227819, -1.840791513621308],
[-0.83084338896122745, -0.43439469601747493, -4.9305653378866143, -1.2823102544613527]]], [[[-0.74612019769311644,
-2.8609369043905408, -4.542597607847612, -3.7269182378522592], [-5.9158668751936148, -8.5811968066770881,
-1.8717775370272092, -5.1936977784481213], [-6.2973240104558732, -0.39261025846176612, -3.1849805513902769,
-0.30856426958445482]], [[-4.8998227094309055, -3.5498670530165466, -6.8546615379105047, -0.67212150315726138],
[-7.9469373051384116, -0.306764496474214, -7.0758991746099111, -5.1900948687459909], [-1.6865641149686867,
0.8622113075167519, 0.22930603944946082, -2.8416267020685204]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.6649927252905226, 0.29496968217893382]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.03366663195)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.6313260933372291, -0.73869694977435962]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.9090880537794526, -3.9706193840215942]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.7233870114697742, 0.99043840493200186])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.6324750652492268, -4.9610577889535961]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.8033126273843685, 0.51509190965393792, 3.931306976936968, -3.3823534090429486,
-2.3486719525293087], [-2.9837425664154784, -2.4457160287299686, 3.8981965382683743, -0.89609359902144714,
4.1620406111464288], [3.6868893591462246, -2.9993029597001462, 1.8283120616948665, -2.0195573949932277,
-2.1640627499057361], [-2.9723279323425489, -4.8559061533246624, -1.0130455282709172, -3.7833351321644395,
3.514692525422209]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.86937457463)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.0660619472497519, -4.3542826649801825, -0.93806759769715242, -8.2517279836770694,
-7.2180465271634286], [-7.8531171410495988, -7.315090603364089, -0.97117803636574607, -5.7654681736555675,
-0.70733396348769162], [-1.1824852154878958, -7.8686775343342665, -3.0410625129392539, -6.8889319696273486,
-7.0334373245398565], [-7.8417025069766693, -9.7252807279587827, -5.8824201029050371, -8.6527097067985608,
-1.3546820492119114]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-1.1140360715186182, -1.5235600156934481, 4.3075103934286023, 4.6800377743432158,
-3.2505150436972521], [0.39123458636258768, 0.41088806870879768, -2.9614108446790501, 1.1049238977643405,
0.92166667279843395], [0.54565864417397059, -4.8476249672143004, 4.9444652981547943, 4.0252126389168215,
-3.9123423425216322], [-3.6777596228844844, -3.4408972758983558, 2.7718180074050611, -0.3997152204895924,
-0.16573647825956073]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.4209487163246299, 1.3152643083131128, -0.71046464711788015, 0.21557543046364458,
-2.202065459251934], [-3.9101544501984198, -2.8682151089642827, 2.7125251197023488, 1.4173123031722534,
2.7246295240806209], [-1.5744991442525436, 3.0598215212654001, 0.63494427405471487, -4.906149376046594,
-1.6839564426436748], [4.0729555430880922, -0.83371622418680769, 0.46337987461630981, 4.0014755703742395,
-2.1103899940006032]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3069126448060118, -2.8388243240065609, 5.0179750405464825, 4.4644623438795712,
-1.0484495844453181], [4.301389036561007, 3.2791031776730803, -5.6739359643813989, -0.31238840540791291,
-1.8029628512821869], [2.1201577884265141, -7.9074464884797004, 4.3095210241000794, 8.9313620149634154,
-2.2283858998779573], [-7.7507151659725766, -2.6071810517115481, 2.3084381327887513, -4.4011907908638319,
1.9446535157410425]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.6064326776506652, 4.9989076052590633], [-3.0068821433777249, -3.1193113732509516]],
[[-1.3190483681618739, 3.9479827067009108], [1.0954417889014865, 4.6359051697534426]], [[-2.9778493741722056,
3.4845430816156977], [1.7569072943914552, 1.1616150547614428]], [[-0.91210869485198565, -1.3406976214361355],
[3.2217649968914159, -2.662260898242006]], [[4.1697693146337542, -1.1741423631833072], [-4.9803850608859115,
1.2700647554700222]], [[4.6074170359664368, 1.453706456526124], [0.20949339688511692,
3.0091215511346796]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-1.04145599079)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5649766868561219, 6.0403635960536066], [-1.9654261525831815, -2.0778553824564083]],
[[-0.27759237736733056, 4.9894386974954541], [2.1368977796960298, 5.6773611605479859]], [[-1.9363933833776623,
4.525999072410241], [2.7983632851859985, 2.2030710455559861]], [[0.12934729594255767, -0.29924163064159215],
[4.2632209876859593, -1.6208049074474626]], [[5.2112253054282975, -0.13268637238876391], [-3.9389290700913682,
2.3115207462645655]], [[5.6488730267609801, 2.4951624473206673], [1.2509493876796602,
4.0505775419292229]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[2.0075159970537113, 4.417162011434554], [0.71949384400506577, 1.0783048900035652]],
[[4.7614254606302335, -2.0888542276996978], [-3.5997702799671547, 4.2825487871951644]], [[-0.39389734575197544,
1.3283252585178928], [3.6919455158435834, -0.76277259642421402]], [[-4.4972180700076887, -3.7983795355307128],
[-0.26779668046970784, -0.79380221724008582]], [[-2.0572521505738273, -1.5154686544559368], [4.0972713376059851,
4.5986089620495108]], [[-1.3971821196462377, 0.16028646761807508], [-0.63755809097850857,
-3.3787710682197272]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[3.5103565349856751, 0.91526758558677379], [-3.7224124618951135, -0.27931399630195397]],
[[1.5813622936549105, 3.6172915696233972], [-1.2364412564258132, 0.16417768270487709]], [[0.64050559170122234,
4.6361361331624593], [-0.47839680540824325, -2.1615310941440589]], [[-0.85667930966756511, 1.669882578368358],
[0.22343162562157293, 0.80905790542025358]], [[-3.5873387244847543, 3.1163266795230058], [3.5553732672252671,
-4.6758779472194405]], [[3.6742958529176484, 0.58762359541383802], [1.5778519953325496, -0.39731537378910975]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5028405379319638, 3.5018944258477802], [4.4419063059001793, 1.3576188863055192]],
[[3.180063166975323, -5.7061457973230949], [-2.3633290235413416, 4.1183711044902873]], [[-1.0344029374531978,
-3.3078108746445665], [4.1703423212518267, 1.3987584977198448]], [[-3.6405387603401236, -5.4682621138990708],
[-0.49122830609128076, -1.6028601226603394]], [[1.5300865739109271, -4.6317953339789426], [0.54189807038071791,
9.2744869092689513]], [[-5.0714779725638861, -0.42733712779576294], [-2.2154100863110582,
-2.9814556944306174]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.66483074145605592, 2.9129070748039982, -1.8655842911981346, -1.098354904466996],
[1.7426470733136448, -2.4896761957460898, 4.3864323453867851, -4.0781460331955177], [-0.62183708580819008,
-2.6186592235582786, -1.8750164189422014, -3.9631241880095969]], [[4.0419620323350909, 0.15536839603964836,
1.9771157591398101, -2.6101097405194453], [-4.7364297803535704, 1.8318126417179714, 3.2354822684907454,
2.2507758179659376], [-4.8699934080808029, -0.35744120243411981, 4.0908957400805122, -3.8440017446794084]]],
[[[4.5466344627836612, -2.8174576749848423, -0.32339288977492142, -3.3368918944053516], [3.3311423168153738,
-1.2448667289851647, -0.66737673743075376, -3.9953617725851598], [-4.8878412407428931, 3.1347720870691358,
-2.4390985397355847, -3.5615840737730475]], [[-3.7978882365989697, 4.345238312451805, 2.8310129832366435,
2.8564779239624674], [-0.85025481289091864, -4.3757742754757345, 3.5451710843902031, -2.5068001174158816],
[2.6943798866386315, 2.2746017608025317, -4.2655778273063607, 0.97165631163417387]]], [[[-2.9330039029788955,
4.3910413333213238, 2.5513441899802833, -3.8678703253194402], [-2.6748516851594308, -3.8887038302549062,
1.2485088138696518, -3.9629424578182251], [-0.38166273681210328, 3.82781593241344, -4.1817331752844087,
4.682478964767725]], [[-0.85849290617372809, -0.49338756563096275, -1.0480256440941615, -0.51008618582467946],
[-0.26820315453886501, 4.8354933917592806, 2.9555158912003154, -2.4766421456452479], [2.5098219987182944,
3.6215601735655589, -4.4497307132070123, -3.9295385075107028]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.59361652138)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.2584472628375467, 5.506523596185489, 0.72803223018335617, 1.4952616169144948],
[4.3362635946951356, 0.10394032563540101, 6.9800488667682759, -1.4845295118140269], [1.9717794355733007,
-0.025042702176787834, 0.7186001024392894, -1.3695076666281061]], [[6.6355785537165817, 2.7489849174211392,
4.5707322805213009, -0.01649321913795454], [-2.1428132589720796, 4.4254291630994622, 5.8290987898722362,
4.8443923393474284], [-2.2763768866993122, 2.236175318947371, 6.6845122614620029, -1.2503852232979176]]],
[[[7.140250984165152, -0.22384115360335155, 2.2702236316065694, -0.74327537302386082], [5.9247588381968646,
1.3487497923963261, 1.926239783950737, -1.401745251203669], [-2.2942247193614023, 5.7283886084506266,
0.15451798164590613, -0.96796755239155674]], [[-1.2042717152174789, 6.9388548338332958, 5.4246295046181343,
5.4500944453439581], [1.7433617084905721, -1.7821577540942437, 6.1387876057716939, 0.08681640396560919],
[5.2879964080201223, 4.8682182821840225, -1.6719613059248699, 3.5652728330156647]]], [[[-0.33938738159740467,
6.9846578547028146, 5.1449607113617741, -1.2742538039379494], [-0.081235163777940045, -1.2950873088734154,
3.8421253352511426, -1.3693259364367343], [2.2119537845693875, 6.4214324537949308, -1.5881166539029179,
7.2760954861492158]], [[1.7351236152077627, 2.100228955750528, 1.5455908772873292, 2.0835303355568113],
[2.3254133668426258, 7.4291099131407714, 5.5491324125818062, 0.11697437573624292], [5.1034385200997852,
6.2151766949470497, -1.8561141918255215, -1.335921986129212]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.140332416756844, -4.5756565160935745, 1.0268217328307561, 1.594533973931731],
[4.1426026647673879, 0.1548614651600202, 3.351820863446946, 0.54777524679756073], [-4.6470169243406527,
-3.4101935702258368, 1.3604597013400213, -4.3236653508957374]], [[2.3543066928954612, 1.6355558219698443,
3.8590758340122093, 0.055467084597328409], [1.3949738751098479, -2.9042097100731445, 2.1331143130237962,
-0.45715627400394165], [3.9505052117900146, -4.8644226435153097, 0.13641466419900183, 0.92434447564323374]]],
[[[-4.2036478385109302, -2.2096856472681958, -3.309442061812593, -0.17761420723311439], [-4.5417481392819026,
3.354117107537796, 2.9925164896060084, 4.231145636082223], [-4.3165407391400308, -0.16204594013147311,
-1.5308101185053733, 3.7017204822457384]], [[2.4648028362561725, 0.43817614121240833, -4.4908194091317366,
-0.081928750874263656], [-3.4087689978816016, 4.259133980931324, -4.2850896710829334, 4.6395735766216326],
[-1.3584480043808989, -4.7738821023855085, -1.2617431337636842, -1.2598313032270116]]], [[[2.2708892792624855,
1.9132737394453327, -0.50215367058696003, 0.19108419265161469], [-2.0796597802531669, 1.1505151966811367,
1.2957662425378791, -1.5883201097665802], [-1.7035021892623838, 4.8639671345493021, 3.1243484697100534,
0.47610495992410051]], [[-4.0444287366693015, -1.3614006776767349, -0.18268931922481002, 4.8063591217845332],
[3.1407426206783704, 2.8940879164962441, -4.9664997014592807, 1.6951588068340158], [-3.895479459710558,
1.7220903215355694, -3.7165673657855267, 3.1903385713544257]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-4.3482304868754991, -1.2480666735558845, 0.43538858115159051, -2.0858236027245205],
[-2.442305699452354, 2.0213192586154003, -2.5262404161243679, -4.458062700052194], [0.26228138879138641,
-2.6430658161459242, -4.7246503759525602, 4.2538788761081854]], [[-1.6124403577544308, -1.8284497197976037,
-3.0160374139385002, 2.7523938918136759], [1.4437250527651582, -2.7814473787336489, 3.5116683735594361,
-3.9808640616716562], [1.7054962689298705, 4.7974185413341068, 1.9447068850818283, -1.2797130952071156]]],
[[[3.7642823106611107, 0.11145650212965919, -0.096799862214571597, 2.0215787533002523], [0.26390717935294816,
0.12612295721321498, 4.0275730341758482, -1.2268861937462172], [-2.947926663434548, -1.4514539315574626,
2.4550945474164232, -2.7897655841602651]], [[-1.5947829088079746, 0.80620330852535815, -4.5614285986030234,
-1.9102368071164841], [2.0807019362652692, -4.099640999530064, -1.8395330667711352, -4.6367501410986929],
[-2.5162327168837786, 4.6954385782651951, -2.1576821461704854, -1.62194811763983]]], [[[0.06729391952569852,
-0.57919376543293488, -3.1838952254737416, 1.7056529660452817], [3.6116233555564143, 0.81964000588296315,
-0.16440769780998377, 0.079355513141521783], [2.9805073823987431, 1.3188532056435962, 3.4153481616516537,
-2.5138710663982189]], [[2.8884594089569315, 1.1351683507610142, -0.68804270946144719, -4.7325886514124882],
[1.1204800401276476, 0.55566378590737031, 0.94240513232859335, 2.9610440134171334], [-2.6222587774463815,
-4.4048348584786705, -0.29650368246657699, -1.0078523107846902]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.4885629036323431, -3.32758984253769, 0.59143315167916555, 3.6803575766562515],
[6.5849083642197419, -1.8664577934553801, 5.8780612795713143, 5.0058379468497547], [-4.9092983131320391,
-0.76712775407991263, 6.0851100772925815, -8.5775442270039228]], [[3.9667470506498921, 3.464005541767448,
6.8751132479507095, -2.6969268072163475], [-0.048751177655310229, -0.12276233133949566, -1.3785540605356399,
3.5237077876677145], [2.2450089428601441, -9.6618411848494166, -1.8082922208828265, 2.2040575708503494]]],
[[[-7.9679301491720409, -2.321142149397855, -3.2126421995980214, -2.1991929605333667], [-4.8056553186348507,
3.227994150324581, -1.0350565445698399, 5.4580318298284407], [-1.3686140757054828, 1.2894079914259895,
-3.9859046659217965, 6.4914860664060035]], [[4.0595857450641475, -0.36802716731294982, 0.070609189471286804,
1.8283080562422205], [-5.4894709341468708, 8.3587749804613871, -2.4455566043117982, 9.2763237177203255],
[1.1577847125028797, -9.4693206806507035, 0.89593901240680118, 0.3621168144128184]]], [[[2.203595359736787,
2.4924675048782676, 2.6817415548867816, -1.514568773393667], [-5.6912831358095808, 0.33087519079817351,
1.4601739403478629, -1.667675622908102], [-4.684009571661127, 3.5451139289057059, -0.29099969194160025,
2.9899760263223194]], [[-6.932888145626233, -2.4965690284377491, 0.50535339023663717, 9.5389477731970214],
[2.0202625805507228, 2.3384241305888738, -5.908904833787874, -1.2658852065831177], [-1.2732206822641765,
6.1269251800142399, -3.4200636833189497, 4.1981908821391158]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(-2.29417952191,self.functionspace)
arg0.setTaggedValue(1,-4.27612309963)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.86386679086)
sub=res.substitute({arg1:s1})
ref=Data(0.569687268944,self.functionspace)
ref.setTaggedValue(1,-1.41225630877)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(-4.72691427991,self.functionspace)
arg0.setTaggedValue(1,0.483106242273)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-0.58516003749737244, 2.93231182282255])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.1417542424175267, -7.6592261027374491]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0682662797700972, -2.4492055805498252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(4.84060376911,self.functionspace)
arg0.setTaggedValue(1,-3.32867505476)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.5332516865172998, 4.2256878903288939, -4.6404295927681405, 4.9721874322243114,
-1.5545932240349902], [0.40603544670242542, -2.879718425724147, -2.1385047584627337, 4.6127992237598132,
0.57646645021785048], [-2.6334801212800754, -2.3655947826469701, 0.48086858542515643, 1.0360291664664301,
-3.4378490059536082], [-0.23853194944872236, -2.0363663305583768, -2.3289186751171798, 3.5102407359843486,
4.1303419895739388]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3073520825884426, 0.6149158787768485, 9.4810333618738838, -0.13158366311856895,
6.3951969931407326], [4.434568322403317, 7.7203221948298895, 6.9791085275684761, 0.2278045453459292,
4.2641373188878919], [7.4740838903858178, 7.2061985517527125, 4.359735183680586, 3.8045746026393124,
8.2784527750593497], [5.0791357185544648, 6.8769700996641188, 7.1695224442229222, 1.3303630331213938,
0.71026177953180358]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-6.8619267412736988, -7.5543629450852929, 1.3117545380117415,
-8.3008624869807104, -1.7740818307214088], [-3.7347105014588244, -0.44895662903225197, -1.1901702962936653,
-7.9414742785162122, -3.9051415049742495], [-0.69519493347632366, -0.96308027210942893, -3.8095436401815554,
-4.3647042212228291, 0.10917395119720918], [-3.0901431053076767, -1.2923087241980222, -0.99975637963921926,
-6.8389157907407476, -7.4590170443303379]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-3.20552188916,self.functionspace)
arg0.setTaggedValue(1,-0.473083670166)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.71230320805011704, -3.008236723891188], [0.81066003773158002, -3.6043239509733382]],
[[3.691034498943317, -3.3919882986743777], [0.84551364067512935, 3.3207859438709946]], [[0.41963337446652105,
-3.6038224020133991], [-2.3537235378574151, -3.7120927558232997]], [[-3.4588851001838727, -0.31880183563871789],
[-1.3379489058063267, -3.9118810181560226]], [[4.4984539881701195, -3.2158956295350851], [1.5013508852420685,
2.8717656529358955]], [[-0.13701019263353231, -3.1176264463626078], [-1.67955120335195, 4.317481449568719]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.917825097207726, -0.19728516526642093], [-4.016181926889189, 0.3988020618157293]],
[[-6.896556388100926, 0.18646640951676874], [-4.0510355298327383, -6.5263078330286035]], [[-3.62515526362413,
0.39830051285579016], [-0.85179835130019388, 0.50657086666569073]], [[0.2533632110262638, -2.886720053518891],
[-1.8675729833512822, 0.70635912899841369]], [[-7.7039758773277285, 0.010373740377476182], [-4.7068727743996774,
-6.0772875420935044]], [[-3.0685116965240766, -0.087895442795001166], [-1.525970685805659,
-7.523003338726328]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-1.1853868782160886, 2.5351530537252165], [-1.2837437078975515,
3.1312402808073667]], [[-4.1641181691092886, 2.9189046285084062], [-1.3185973108411009, -3.7938696140369661]],
[[-0.89271704463249257, 3.1307387318474276], [1.8806398676914435, 3.2390090856573281]], [[2.9858014300179012,
-0.15428183452725364], [0.86486523564035522, 3.4387973479900511]], [[-4.9715376583360911, 2.7428119593691136],
[-1.97443455540804, -3.344849323101867]], [[-0.33607347753243921, 2.6445427761966362], [1.2064675331859784,
-4.7905651197346906]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-0.215341183726,self.functionspace)
arg0.setTaggedValue(1,-3.01917111711)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[3.1718058337950783, -4.3218518167555349, 4.7360170033398816, 2.6415781893387447],
[1.7953624357215787, 0.37239845986582054, 0.85595953231170441, -4.2093909477304852], [-4.0724848735753412,
-2.3789549933876364, 3.8266481046469991, -4.4686983670793881]], [[-1.3807814097985793, -0.9345570079736385,
3.2111606830229267, 2.5248569160832579], [-0.19847478717542089, 3.6200277417416071, -1.3367301493578787,
-1.9914051287776093], [4.2384277387383236, -3.1625190831895669, -4.8267032630177118, -3.7590986361039294]]],
[[[-0.96721285038350846, 0.23717549644533698, -2.0558971771798862, -2.1889488119398925], [2.1163450477817447,
-4.308535473047935, 0.96468545582662735, 0.58036767508710252], [-0.26889479983427034, -4.6749066439752021,
-2.6908936581627731, 3.3090528029139286]], [[1.0683391958055246, -4.3705975019062535, 4.6959723711804546,
-0.58815635047014858], [-1.7921642772643898, 2.8079866307247423, 4.5837878995413348, -3.6656523242301429],
[2.1083853748587442, -0.44280454111162726, -2.5427523262585563, 3.9551312168955626]]], [[[4.0479839543530591,
1.694708528108122, -1.8081650371476021, 2.5627212563151982], [2.9443513555348222, -3.4330381296191126,
-2.3471872352829837, 2.9291777099369405], [0.92208424820838264, -1.7857214370413055, 3.2638247404414695,
3.3713981402987798]], [[-2.3853121535462418, 2.1417428055374232, 3.1558224539661612, -4.4802179321245248],
[-3.0197245205703069, 2.7624146301708477, -4.6790033997765104, -4.0453165901737584], [4.8295161047601614,
-3.5764718373510842, 4.356981591617421, -4.7034098127513264]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-3.3871470175211567, 4.1065106330294565, -4.95135818706596, -2.856919373064823],
[-2.0107036194476571, -0.5877396435918989, -1.0713007160377828, 3.9940497640044068], [3.8571436898492628,
2.163613809661558, -4.0419892883730775, 4.2533571833533097]], [[1.165440226072501, 0.71921582424756014,
-3.426501866749005, -2.7401980998093363], [-0.01686639655065747, -3.8353689254676855, 1.1213889656318003,
1.776063945051531], [-4.4537689224644019, 2.9471778994634885, 4.6113620792916334, 3.543757452377851]]],
[[[0.7518716666574301, -0.45251668017141533, 1.8405559934538078, 1.9736076282138142], [-2.3316862315078231,
4.0931942893218567, -1.1800266395527057, -0.79570885881318087], [0.053553616108191981, 4.4595654602491237,
2.4755524744366948, -3.5243939866400069]], [[-1.283680379531603, 4.1552563181801752, -4.911313554906533,
0.37281516674407023], [1.5768230935383114, -3.0233278144508207, -4.7991290832674132, 3.4503111405040645],
[-2.3237265585848226, 0.2274633573855489, 2.3274111425324779, -4.1704724006216409]]], [[[-4.2633251380791375,
-1.9100497118342004, 1.5928238534215238, -2.7780624400412766], [-3.1596925392609005, 3.2176969458930342,
2.1318460515569053, -3.1445188936630188], [-1.137425431934461, 1.5703802533152271, -3.4791659241675479,
-3.5867393240248582]], [[2.1699709698201635, -2.3570839892635016, -3.3711636376922396, 4.2648767483984464],
[2.8043833368442286, -2.977755813896926, 4.463662216050432, 3.8299754064476801], [-5.0448572884862397,
3.3611306536250058, -4.5723227753434994, 4.4880686290252481]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-6.1909769509085075, 1.3026806996421056, -7.7551881204533109,
-5.6607493064521739], [-4.8145335528350079, -3.3915695769792498, -3.8751306494251336, 1.1902198306170559],
[1.0533137564619119, -0.64021612372579284, -6.8458192217604283, 1.4495272499659588]], [[-1.6383897073148499,
-2.0846141091397907, -6.2303318001363559, -5.5440280331966871], [-2.8206963299380083, -6.6391988588550364,
-1.6824409677555505, -1.0277659883358199], [-7.2575988558517528, 0.14334796607613765, 1.8075321459042826,
0.73992751899050013]]], [[[-2.0519582667299208, -3.2563466135587662, -0.96327393993354304, -0.83022230517353668],
[-5.1355161648951739, 1.2893643559345058, -3.9838565729400566, -3.5995387922005317], [-2.7502763172791589,
1.6557355268617728, -0.32827745895065608, -6.3282239200273578]], [[-4.0875103129189538, 1.3514263847928243,
-7.7151434882938839, -2.4310147666432806], [-1.2270068398490395, -5.8271577478381715, -7.602959016654764,
0.64648120711671364], [-5.1275564919721734, -2.576366576001802, -0.47641879085487293, -6.9743023340089918]]],
[[[-7.0671550714664884, -4.7138796452215512, -1.2110060799658271, -5.5818923734286274], [-5.9635224726482514,
0.41386701250568336, -0.67198388183044555, -5.9483488270503697], [-3.9412553653218119, -1.2334496800721237,
-6.2829958575548988, -6.390569257412209]], [[-0.63385896356718741, -5.1609139226508525, -6.1749935710795905,
1.4610468150110956], [0.0005534034568777102, -5.7815857472842769, 1.6598322826630811, 1.0261454730603292],
[-7.8486872218735906, 0.55730072023765498, -7.3761527087308503, 1.6842386956378972]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.3101673523710691, 0.048409361416743124]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([0.70887806236646611, -0.73932065177372408]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.15960287006)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([2.1505644823090515, -1.1111935086452744]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.45072480769555145, -1.8989235218357416]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-2.0708546339036071, 2.2714034647505121]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-0.16265022615439584, -0.29272834777410406]))
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.8495632665872739, -2.2808524667130694])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-3.920417900490881, 4.5522559314635815]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-2.0122134927416697, 1.9881241189389653]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[4.703380807076492, -4.2567944639019304, -2.0784707905046593, 0.18023637488621791,
1.1164321428411501], [3.3809585074696322, 1.5795463086222137, 1.5300027430790495, -1.6695215658775489,
-4.9671698822372887], [-0.56875186129757704, -0.88988163011215704, 1.0953422249288387, 1.2629450835517639,
1.9829321534877584], [-2.3470243950738103, -1.5345245349366401, 1.7913793425402638, 3.2778179482022125,
3.2743088989127749]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.1331140495285128, 4.902243346193929, -3.8569193535703947,
-1.2051025219030698, 4.8526791592750644], [-1.9285295160668192, -2.2715983725035862, -1.6280809153232632,
0.63571110979312273, -4.5616322454088643], [1.1933837591252878, -2.4657544917793928, 3.8511059475300904,
-3.0018611957635444, 3.560382804940847], [-4.284584247208282, -4.3366343606789348, 3.6048395763720524,
-2.2301793774115106, 4.6397261587379131]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0560012612314)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.6473795458450571, -4.3127957251333653, -2.1344720517360942, 0.12423511365478301,
1.0604308816097152], [3.3249572462381973, 1.5235450473907788, 1.4740014818476146, -1.7255228271089837,
-5.0231711434687236], [-0.62475312252901194, -0.94588289134359194, 1.0393409636974038, 1.206943822320329,
1.9269308922563235], [-2.4030256563052452, -1.590525796168075, 1.7353780813088289, 3.2218166869707776,
3.21830763768134]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.0771127882970779, 4.8462420849624941, -3.9129206148018296,
-1.2611037831345047, 4.7966778980436295], [-1.9845307772982541, -2.3275996337350211, -1.6840821765546981,
0.57970984856168783, -4.6176335066402991], [1.1373824978938529, -2.5217557530108277, 3.7951046862986555,
-3.0578624569949793, 3.5043815437094121], [-4.3405855084397169, -4.3926356219103697, 3.5488383151406175,
-2.2861806386429455, 4.5837248975064782]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[0.044613582775737015, -0.22965054883260905, -3.3954728255423361, -0.043404784226975579,
-0.81018025865095922], [4.0980455142640473, 3.3299876326958326, 4.4694158188546833, 0.047800124529065791,
-4.1128886475115927], [-0.86793714814288414, 3.7852706993586231, 2.8168181178475837, -2.6081900317073039,
1.795227525921204], [-2.7964436060814792, 2.46599228887926, -4.3894587372918519, -3.0809581135280197,
4.5629513161933648]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[0.18467263707487369, -2.906541382403959, -4.2471361917218733,
1.7478696798949915, -2.0555035204044225], [-4.1703824796767011, -0.58145273211245829, -1.3034416354534684,
-4.4238643252257699, -3.0019960418182654], [-0.011560599410600503, 4.5614736908410478, -4.1865499712522745,
0.41611035316936196, 1.4719370557053075], [3.3285499812876207, 4.2147545548351992, 3.8796865015190463,
-2.8665673368928459, 3.8754754018195001]]))
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-0.34040680852948757, 0.51480179015857086, 2.6579250902566542, -3.8908104282358877,
-1.0766494604779266], [-1.7785348143550985, 1.7875285221080928, -0.26464821727786259, 3.7856697734154743,
0.14935084548977784], [1.6454427368239299, -3.0878902261983701, 2.1577262475041596, -3.540342914142153,
2.8529020416879671], [2.8849125795379305, -3.1409630887157123, -0.30215664293811351, 3.5493007526176896,
0.27226779139430857]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.38502039130522459, -0.74445233899117991, -6.0533979157989908, 3.8474056440089122,
0.26646920182696743], [5.8765803286191458, 1.5424591105877399, 4.7340640361325459, -3.7378696488864085,
-4.2622394930013705], [-2.5133798849668141, 6.8731609255569932, 0.65909187034342409, 0.93215288243484906,
-1.0576745157667631], [-5.6813561856194097, 5.6069553775949723, -4.0873020943537384, -6.6302588661457094,
4.2906835247990562]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.52507944560436126, -3.4213431725625298, -6.9050612819785275,
5.6386801081308793, -0.97885405992649588], [-2.3918476653216025, -2.3689812542205511, -1.0387934181756058,
-8.2095340986412442, -3.1513468873080432], [-1.6570033362345304, 7.6493639170394179, -6.3442762187564341,
3.9564532673115149, -1.3809649859826596], [0.44363740174969024, 7.3557176435509115, 4.1818431444571598,
-6.4158680895105356, 3.6032076104251916]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.70323441272603926, -1.4205742401701604], [-3.6004008923276585, 4.1739347100888349]],
[[-2.7687391296703767, -0.96114141211843496], [0.45711266950319906, 0.36713165606152121]], [[3.8726070188081287,
2.6611494194452137], [-0.28060302358441547, 1.0399275995737964]], [[2.5912385881777, -0.12172669528696911],
[1.831517522951442, -4.9891623764024926]], [[3.8572507842255241, 2.9719918728052663], [0.42882676434271261,
-1.4826468418372341]], [[0.16110396579090835, 4.8052378752678955], [2.4890225545274554,
-1.4594734254395068]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.4601998637619467, 3.5105292543746671], [-1.9715134513187751,
1.6897677346566677]], [[0.99895689216195205, 3.7908023259957879], [-2.9811497902134496, 0.46336396583979944]],
[[-2.0979181014824011, 0.68992077008736707], [4.5817275596392033, 3.1112543881649586]], [[-1.0666850119171398,
-3.7136243224538679], [-2.1842168128700248, -0.60998709362389292]], [[-1.0817587775668578, 1.1357523207967555],
[0.72114300996433212, 2.0871085948686607]], [[2.6196090777455074, -4.8403131105182826], [4.4462612480444346,
2.6275786734235638]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.40075496466)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-4.1039893773891789, -4.8213292048333001], [-7.0011558569907981, 0.77317974542569523]],
[[-6.1694940943335164, -4.3618963767815746], [-2.9436422951599406, -3.0336233086016184]], [[0.4718520541449891,
-0.73960554521792599], [-3.6813579882475551, -2.3608273650893432]], [[-0.80951637648543961, -3.5224816599501088],
[-1.5692374417116977, -8.3899173410656331]], [[0.4564958195623845, -0.42876309185787331], [-2.971928200320427,
-4.8834018065003733]], [[-3.2396509988722313, 1.4044829106047558], [-0.91173241013568429,
-4.8602283901026464]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.059444899098807014, 0.10977428971152747], [-5.3722684159819147,
-1.7109872300064719]], [[-2.4017980725011876, 0.39004736133264828], [-6.3819047548765893, -2.9373909988233402]],
[[-5.4986730661455407, -2.7108341945757726], [1.1809725949760637, -0.28950057649818106]], [[-4.4674399765802795,
-7.1143792871170071], [-5.5849717775331644, -4.0107420582870326]], [[-4.4825137422299974, -2.2650026438663842],
[-2.6796119546988075, -1.3136463697944789]], [[-0.7811458869176322, -8.2410680751814223], [1.0455062833812949,
-0.77317629123957587]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.8893927498914151, -3.9495986710021471], [2.0674301637688552, -4.9323681378020368]],
[[-3.9365223323164567, -3.9166796931279513], [-2.1295831296849688, 0.049270642730291137]], [[1.1604521699930164,
-4.7263968957110194], [0.18403419227820805, -3.9919770732677948]], [[-4.4683480884742268, 3.1077188243660192],
[0.090355977211302729, -0.013539049772621325]], [[1.2239143556433882, 4.66468811676115], [4.6443599318212119,
2.902664355759085]], [[3.1499666861977964, 3.5678517696258449], [0.73557701807290599,
-4.1703133219986768]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[0.62745401025262382, 0.69024538347902542], [4.3685303267738433,
2.2109723240557235]], [[-0.7348498808881363, -2.7513236139357309], [2.5887407011037489, 4.1931952710033542]],
[[2.1336250254996258, -2.1610465999144091], [-4.054796877122568, 0.054975312915938268]], [[2.8778982280083021,
0.031841424972327559], [-1.6040852288365626, -0.14653197703489251]], [[1.0241081083490533, 2.0236436389548764],
[-4.7683548819587331, 0.81201234013234735]], [[-3.2923450240347405, 2.2531528995219965], [-3.594199051432386,
-1.9523442452177875]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.67454553417657603, 2.9833990689244789], [-3.9375622829117427, 0.0094498156860893801]],
[[2.1574617938010734, -0.48892733726965609], [0.62118276066421352, 0.99065918564407696]], [[1.7968244154456219,
-1.6314349433046926], [1.8612952961850224, 4.6630470176393288]], [[0.43763307675500052, 4.0271951272236688],
[-1.1711764825930993, -4.5547560714878275]], [[2.514477748308436, 3.7600620047710827], [1.5805136896170069,
2.4948517124974012]], [[-0.74781838229224817, -2.9876928953003903], [4.1339271192034222, 4.4719827170790509]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.5639382840679912, -6.932997739926626], [6.004992446680598, -4.9418179534881261]],
[[-6.0939841261175296, -3.4277523558582952], [-2.7507658903491823, -0.94138854291378582]], [[-0.63637224545260551,
-3.0949619524063268], [-1.6772611039068144, -8.6550240909071228]], [[-4.9059811652292273, -0.91947630285764959],
[1.261532459804402, 4.5412170217152061]], [[-1.2905633926650477, 0.90462611199006737], [3.063846242204205,
0.40781264326168376]], [[3.8977850684900446, 6.5555446649262352], [-3.3983501011305162,
-8.6422960390777277]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.047091523923952217, -2.2931536854454535], [8.3060926096855852,
2.2015225083696341]], [[-2.8923116746892097, -2.2623962766660748], [1.9675579404395354, 3.2025360853592773]],
[[0.33680061005400397, -0.52961165660971643], [-5.9160921733075904, -4.6080717047233906]], [[2.4402651512533016,
-3.9953537022513412], [-0.43290874624346332, 4.4082240944529349]], [[-1.4903696399593827, -1.7364183658162062],
[-6.34886857157574, -1.6828393723650539]], [[-2.5445266417424923, 5.2408457948223868], [-7.7281261706358082,
-6.4243269622968384]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.1002455029763922, 2.6515488300516923, -0.77582358496211956, -3.4443694355246803],
[-2.6599091620789581, -0.70044327546902529, -4.3223485855396966, 4.9338402947088049], [-4.5546987200991147,
-4.159833516760548, -1.2113818643763619, 1.341501344402797]], [[-0.99132126989665803, -3.81966827017445,
-1.5631671743562592, -2.9170370396917167], [0.94015514336519956, -4.5328623228274036, 2.5469993786586862,
4.5298447080413311], [-1.8826808741220304, -0.21100480137345734, -1.7750931594239239, -3.5343470478632764]]],
[[[-3.4624410933639691, 3.7419877938482422, -4.1641241285521557, -2.8763768520849711], [4.3838179808162643,
-0.076650368742670949, -2.2790272387608601, 1.4407514353417152], [-0.58059366739859364, 3.0282179950037378,
4.3946428646333242, -3.9361840734571896]], [[-0.40769305246403231, -0.93123230765280152, -3.5500981163613665,
-1.4382421516555786], [0.18862577968690264, 3.8234595158976035, 1.2783334948832605, -0.84599833008897818],
[-1.5452449895609535, -2.1285283532469434, 2.9517034908101669, -1.043778516582341]]], [[[2.5188074736534176,
4.926760464276164, -1.2494158315784532, -4.1847607799981805], [1.764772573553314, 4.6090994448443769,
-3.7864884573437072, 2.5743244083963681], [-0.44624416686502322, -0.44288726525437028, -2.5180469174818598,
-4.8009656021603]], [[-1.0967276921708047, -1.5639987059537273, -3.3122649580537331, -3.947879272385495],
[4.1267460589959857, -4.5801997177900287, 0.85366271506547697, -3.5573421152778972], [-4.7127368302025108,
-4.5592524679039892, -1.8586387462495613, -3.2614675219884837]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.6140016210408508, -4.1545999292001445, 4.9863169403898908,
-2.2007289242442383], [-2.3634275248295822, 1.4929955627211893, 1.1905831175627091, -3.1298255396253936],
[-0.78867439130174599, -2.5664248245819756, -1.882393556334109, -2.3300345925878529]], [[3.7578772846055983,
-1.9632657478837121, -1.3792653830852455, -0.23840250166856869], [-1.650781665029756, -3.2744446113480907,
-1.2541229166086589, -2.3471598629273149], [-1.939332795628903, 0.81542234976851624, 0.52422540705571663,
0.91808367692950554]]], [[[-3.0689349511345867, -4.8032602579819264, 3.769084882991141, -1.5864959564378189],
[-3.2063200431555905, -0.3347729502698602, 1.763270929850381, 0.65936335478094321], [-3.6143633139881959,
0.15424644431103118, 3.7156782910709154, -3.2826914978804203]], [[-0.091940996157960697, 2.5331247115220021,
3.4383904670893202, 0.77887041122794898], [4.2850997491436988, 3.3877021574758341, 3.9303516193668084,
0.97217787674818279], [-1.8219977615256742, 3.7582967180633755, -3.967674705101544, 3.2183851949652524]]],
[[[3.8000102844693906, -2.9266220460152672, 0.11901081743168795, -0.70455205529677301], [4.6787843021952913,
-3.2637583894745239, 4.6693989140352041, 2.042172937625808], [-2.9445501417858964, 0.36254085518902812,
2.8333171427728354, -2.7757509476245721]], [[3.8180860212706147, -3.4817247466262815, -3.2683613783585006,
-2.0706219843820262], [4.8065072235822566, 2.2788211866672707, 3.8562835841415382, -1.1633706258500731],
[2.652336823163191, -2.6060953909144513, 0.62089818312127321, -1.6242126976534612]]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.55573857649)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.6559840794689205, 7.2072874065442205, 3.7799149915304087, 1.1113691409678479],
[1.8958294144135701, 3.8552953010235029, 0.23338999095283164, 9.4895788712013331], [0.0010398563934135296,
0.3959050597319802, 3.3443567121161664, 5.8972399208953252]], [[3.5644173065958702, 0.73607030631807824,
2.992571402136269, 1.6387015368008115], [5.4958937198577278, 0.02287625366512458, 7.1027379551512144,
9.0855832845338593], [2.6730577023704978, 4.3447337751190709, 2.7806454170686044, 1.0213915286292519]]],
[[[1.0932974831285591, 8.2977263703407704, 0.39161444794037248, 1.6793617244075572], [8.9395565573087925,
4.4790882077498573, 2.2767113377316681, 5.9964900118342435], [3.9751449090939346, 7.583956571496266,
8.9503814411258524, 0.61955450303533866]], [[4.1480455240284959, 3.6245062688397267, 1.0056404601311617,
3.1174964248369497], [4.7443643561794309, 8.3791980923901317, 5.8340720713757888, 3.70974024640355],
[3.0104935869315748, 2.4272102232455848, 7.5074420673026951, 3.5119600599101872]]], [[[7.0745460501459458,
9.4824990407686922, 3.3063227449140751, 0.3709777964943477], [6.3205111500458422, 9.1648380213369052,
0.76925011914882102, 7.1300629848888963], [4.109494409627505, 4.1128513112381579, 2.0376916590106684,
-0.24522702566777177]], [[3.4590108843217235, 2.991739870538801, 1.2434736184387951, 0.60785930410703326],
[8.6824846354885139, -0.024461141297500433, 5.4094012915580052, 0.99839646121463099], [-0.15699825370998255,
-0.0035138914114609676, 2.697099830242967, 1.2942710545040446]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.94173695545167746, 0.40113864729238369, 9.542055516882419,
2.35500965224829], [2.192311051662946, 6.0487341392137175, 5.7463216940552373, 1.4259130368671347],
[3.7670641851907822, 1.9893137519105526, 2.6733450201584192, 2.2257039839046753]], [[8.3136158610981266,
2.5924728286088161, 3.1764731934072827, 4.3173360748239595], [2.9049569114627722, 1.2812939651444375,
3.3016156598838693, 2.2085787135652133], [2.6164057808636252, 5.3711609262610445, 5.0799639835482449,
5.4738222534220338]]], [[[1.4868036253579415, -0.24752168148939813, 8.3248234594836692, 2.9692426200547093],
[1.3494185333369377, 4.220965626222668, 6.3190095063429093, 5.2151019312734714], [0.94137526250433234,
4.7099850208035594, 8.2714168675634436, 1.273047078612108]], [[4.4637975803345675, 7.0888632880145304,
7.9941290435818484, 5.3346089877204772], [8.8408383256362271, 7.9434407339683624, 8.4860901958593367,
5.527916453240711], [2.7337408149668541, 8.3140352945559037, 0.58806387139098426, 7.7741237714577807]]],
[[[8.3557488609619188, 1.629116530477261, 4.6747493939242162, 3.8511865211957552], [9.2345228786878195,
1.2919801870180043, 9.2251374905277324, 6.5979115141183362], [1.6111884347066319, 4.9182794316815563,
7.3890557192653636, 1.7799876288679561]], [[8.3738245977631429, 1.0740138298662467, 1.2873771981340276,
2.4851165921105021], [9.3622458000747848, 6.834559763159799, 8.4120221606340664, 3.3923679506424551],
[7.2080753996557192, 1.9496431855780769, 5.1766367596138014, 2.931525878839067]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.1869721643026576, 0.35542091272423715, 2.5099944114031967, 4.7276012581949995],
[-0.23596027111215712, 3.2557128306673206, -2.4174678213407566, 4.9025765849007588], [3.4987602616867228,
-2.3969967727517094, 2.614715035832643, -3.9538109091356577]], [[0.54151166641114745, 4.3433313907072311,
-3.9824411189395126, 0.11193040884063787], [-4.3326960505433521, -2.6555021449849603, -1.6650005107909016,
-0.21278258756168267], [2.9438726263016104, 4.614591333740627, -1.4283352855346321, 4.195747529596801]]],
[[[0.4129039465707498, 0.25218586208094607, 4.2227877593235625, -3.8395686827717723], [-4.246422814789943,
-4.2708029152046789, -4.4791253262093615, 2.3703854064691221], [-0.32074671911367325, -4.0633264555676574,
-4.8034904727622223, 0.101245496731595]], [[3.3860052077100544, 4.4048456672981686, 3.3258905421337257,
-0.60591078242426555], [2.9574702297232829, 2.9390786518156196, 3.0627580449874809, -2.1902821038190523],
[1.2765769390449559, 4.5442832941192819, 0.47031486471564055, -3.2094801674304509]]], [[[1.4972627407797212,
-2.7514173987810633, 0.19744444113354387, 1.3720920976100972], [-3.147124860705004, -3.6707691951555885,
1.1521564952279704, -0.12493802519996233], [1.3717811158015873, -1.737983464544548, -2.5919544001996897,
-4.4195022009129206]], [[-3.5078213357756582, 1.5909514876001909, 3.932618549290213, 0.32844467348406869],
[-0.037083415286228494, 2.358949404615915, -3.7082781631298478, -4.9441324919087766], [1.219588665287433,
-2.1155364750524797, 2.3443039764677165, 4.1618790582351313]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[3.8216987557975131, -0.59039813916696193, -1.9474433412604117,
4.1666345075852202], [1.0033840403657788, -1.8365638623400207, -1.1472895447555285, 0.49043998461267968],
[1.525782098623524, 0.98710575843395354, 1.9521603305269073, 1.4982217977497818]], [[4.8105014981222372,
0.18255767851204219, 0.10092997041413909, 2.3610713615733667], [3.8639541584797801, 1.8455276769077198,
3.9278199867001007, 2.5501176762845867], [3.2925051662999447, 0.78129602184334157, -0.73105877010655362,
2.9378923845982694]]], [[[1.3162347911484948, -1.7534583809398363, -4.4745574675152744, 0.84388146264593455],
[-2.1398633576757309, 1.6224556269216279, 4.0151064679341637, 0.81646760002277574], [0.95506629968888479,
-3.384786519820715, 2.08961451298733, 1.4802214615087061]], [[2.5752388025402837, -2.7094797245847468,
-2.6808155024703106, -1.7780191613070642], [-0.58755728186204248, -4.3097624692690948, 3.6757907841395685,
-1.8312242243207608], [-3.7229135985460826, -1.5786991892133564, 2.6894504757052617, -0.48567336902160463]]],
[[[3.4562176552233623, -1.5291903913231595, 4.9276217294297595, -1.4641622460496571], [-3.9633150641051529,
-1.3895475276782743, -2.0928641563143735, 4.286214622292805], [-0.016872120519226819, -0.86571000346058913,
4.2635805792181465, 4.0351866281897113]], [[-1.973695982407413, -4.452260246087465, -2.5681734906597109,
3.0954829513656215], [2.6526834215550927, -4.3976717675273207, 2.0111485813735106, 2.7969396373439324],
[-0.72100288848623784, 1.4868693846138363, 2.3876845459322045, -3.759851286518614]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.2326165508314046, 0.019536700697927678, 3.3313535404093759, -2.4782775769684271],
[3.9342491756801525, 1.2904741959913864, -2.7701975380199206, 2.4757520771582744], [2.5202328466158281,
-1.3683915774027189, 3.4678638218372768, -2.2884507446983129]], [[-4.9275394706777931, 4.7975831194456333,
1.7829898690658723, -0.96339421834763073], [-2.7923805247323799, -0.026981154987572253, 2.5136604629187271,
0.14658337947380495], [1.1254475424349959, 4.8000437885357261, 3.3479331374253167, 1.6298765760037002]]],
[[[-0.46473842692243572, 1.2430212762010644, -0.23618382206216726, -1.2230171932711418], [2.0127498669810855,
-0.31475870950595031, -0.20645609212011973, -4.9825089187683691], [-4.6108703987985988, -0.47963035537661725,
-3.1919702863790422, -3.9993603357626117]], [[3.8402219409685951, 3.04406815317755, 4.7640360318949195,
1.5279973254325983], [-4.9716807317737235, -3.4706635767559693, -1.2581696190523903, -2.591452040312936],
[1.6191001515432157, -3.5419762128533741, 0.92904425652178801, 4.6966930122512043]]], [[[-2.4787875268428614,
4.8717538415307775, 3.6264063974305554, 2.0645154974740256], [-4.5070489852671329, 2.3540394703493703,
3.2007816723140134, -0.44359603196672026], [2.5406621078154732, 3.6651768892659895, -2.7039262200534422,
-1.9309627063916244]], [[-0.037762488646412962, -4.6825147640959859, -3.1180187992817956, -0.3407644296025687],
[-1.6601757648009907, -1.0174825465103088, 0.060955158106047236, 1.2341204474061849], [-0.24621306712976931,
-1.3620636349151272, -0.12322079758969373, 2.3717593913603183]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.4195887151340623, 0.33588421202630947, -0.82135912900617924, 7.2058788351634266],
[-4.1702094467923096, 1.9652386346759343, 0.35272971667916408, 2.4268245077424844], [0.97852741507089469,
-1.0286051953489905, -0.85314878600463384, -1.6653601644373448]], [[5.4690511370889405, -0.45425172873840225,
-5.7654309880053844, 1.0753246271882686], [-1.5403155258109722, -2.628520989997388, -4.1786609737096292,
-0.35936596703548762], [1.8184250838666145, -0.18545245479509909, -4.7762684229599488, 2.5658709535931008]]],
[[[0.87764237349318552, -0.99083541412011833, 4.4589715813857298, -2.6165514895006305], [-6.2591726817710285,
-3.9560442056987286, -4.2726692340892418, 7.3528943252374912], [4.2901236796849256, -3.5836961001910401,
-1.6115201863831801, 4.1006058324942067]], [[-0.45421673325854073, 1.3607775141206186, -1.4381454897611938,
-2.1339081078568638], [7.9291509614970064, 6.4097422285715888, 4.3209276640398713, 0.40116993649388366],
[-0.34252321249825979, 8.0862595069726559, -0.45872939180614747, -7.9061731796816552]]], [[[3.9760502676225826,
-7.6231712403118408, -3.4289619562970115, -0.69242339986392842], [1.359924124562129, -6.0248086655049589,
-2.0486251770860431, 0.31865800676675793], [-1.1688809920138858, -5.4031603538105379, 0.11197181985375249,
-2.4885394945212962]], [[-3.4700588471292453, 6.2734662516961768, 7.0506373485720086, 0.66920910308663739],
[1.6230923495147622, 3.3764319511262237, -3.7692333212358951, -6.1782529393149614], [1.4658017324172024,
-0.7534728401373525, 2.4675247740574102, 1.7901196668748129]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0543153066289177, -0.60993483986488961, -5.2787968816697877,
6.6449120845536473], [-2.9308651353143738, -3.127038058331407, 1.6229079932643922, -1.9853120925455947],
[-0.99445074799230415, 2.3554973358366724, -1.5157034913103695, 3.7866725424480947]], [[9.7380409688000302,
-4.6150254409335911, -1.6820598986517332, 3.3244655799209974], [6.6563346832121599, 1.872508831895292,
1.4141595237813736, 2.4035342968107818], [2.1670576238649488, -4.0187477666923845, -4.0789919075318704,
1.3080158085945692]]], [[[1.7809732180709306, -2.9964796571409007, -4.2383736454531071, 2.0668986559170763],
[-4.1526132246568164, 1.9372143364275782, 4.2215625600542834, 5.7989765187911448], [5.5659366984874836,
-2.9051561644440977, 5.2815847993663727, 5.4795817972713179]], [[-1.2649831384283114, -5.7535478777622968,
-7.4448515343652302, -3.3060164867396624], [4.384123449911681, -0.83909889251312553, 4.9339604031919588,
0.76022781599217515], [-5.3420137500892988, 1.9632770236400177, 1.7604062191834737, -5.1823663812728089]]],
[[[5.9350051820662237, -6.400944232853937, 1.3012153319992041, -3.5286777435236827], [0.54373392116198005,
-3.7435869980276446, -5.293645828628387, 4.7298106542595253], [-2.5575342283347, -4.5308868927265786,
6.9675067992715887, 5.9661493345813357]], [[-1.935933493761, 0.23025451800852093, 0.54984530862208469,
3.4362473809681902], [4.3128591863560839, -3.3801892210170119, 1.9501934232674634, 1.5628191899377475],
[-0.47478982135646852, 2.8489330195289635, 2.5109053435218982, -6.1316106778789319]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.42413566075)+(1.-msk_arg0)*(2.73592046896)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0730314190245)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(2.35110424173)+(1.-msk_ref)*(2.66288904994)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.38585027921)+(1.-msk_arg0)*(-2.14546935212)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.0449404678521192, -2.9654578889240057])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4307907470591283,
0.57960760971699665])+(1.-msk_ref)*numpy.array([-3.1904098199744872, 0.81998853680163775])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.15276640076)+(1.-msk_arg0)*(-2.04284766814)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.5429314638433684, 2.0318827224945402, -2.3636856893688076, 3.4855417570765717,
0.44952339669472341], [2.5403509140391156, 2.3524971436536095, 3.9461465487262188, 2.6955339698780154,
-0.45702899742654868], [-1.0602022717036155, 0.74771157767510843, 1.6452939357358289, -3.0322095528230921,
1.6787335078454735], [-4.263078102519902, 3.2046384335109863, 4.0147512257312048, 3.3998288702285713,
-0.56118778404289138]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[4.6956978646047602, 0.12088367826685165, 4.5164520901301994, -1.3327753563151798,
1.7032430040666684], [-0.38758451327772381, -0.19973074289221771, -1.793380147964827, -0.5427675691166236,
2.6097953981879405], [3.2129686724650073, 1.4050548230862834, 0.50747246502556287, 5.1849759535844839,
0.47403289291591832], [6.4158445032812939, -1.0518720327495945, -1.861984824969813, -1.2470624694671795,
2.7139541848042832]])+(1.-msk_ref)*numpy.array([[0.50008379570506278, -4.0747303906328458, 0.32083802123050198,
-5.5283894252148773, -2.4923710648330291], [-4.5831985821774213, -4.3953448117919152, -5.9889942168645245,
-4.7383816380163211, -1.585818670711757], [-0.98264539643469018, -2.7905592458134141, -3.6881416038741346,
0.98936188468478647, -3.7215811759837791], [2.2202304343815964, -5.247486101649292, -6.0575988938695104,
-5.4426765383668769, -1.4816598840954143]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.30825297654)+(1.-msk_arg0)*(-3.92076322418)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-0.52002332126128437, 4.3478222442071139], [3.3434922005534364, 2.8013302606159396]],
[[-2.3200079969586795, -3.0556917667690642], [-2.7103276420969582, 4.1511200748037105]], [[-0.92404095393396624,
2.6484690327098859], [-2.1529217611726503, 4.4602897709717144]], [[0.58271708006920253, 1.9322598870751975],
[-3.5184596230462182, -4.4222029485403436]], [[-4.3953168785776278, -4.450145776704125], [4.2137072146995536,
3.8966485797913304]], [[3.1838339108927798, -3.6438064267677328], [1.3789445362861974, -2.9975552731311272]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[3.8282762978010325, -1.0395692676673658], [-0.035239224013688286,
0.50692271592380855]], [[5.6282609734984277, 6.3639447433088119], [6.0185806186367063, -0.84286709826396233]],
[[4.2322939304737144, 0.65978394382986227], [5.4611747377123985, -1.1520367944319663]], [[2.7255358964705456,
1.3759930894645507], [6.8267125995859663, 7.7304559250800917]], [[7.7035698551173759, 7.7583987532438732],
[-0.90545423815980541, -0.58839560325158224]], [[0.12441906564696836, 6.952059403307481], [1.9293084402535507,
6.3058082496708749]]])+(1.-msk_ref)*numpy.array([[[-3.4007399029178136, -8.2685854683862114], [-7.2642554247325339,
-6.7220934847950371]], [[-1.6007552272204184, -0.86507145741003377], [-1.2104355820821397, -8.071883298982808]],
[[-2.9967222702451317, -6.5692322568889843], [-1.7678414630064476, -8.3810529951508119]], [[-4.5034803042483009,
-5.853023111254295], [-0.40230360113287977, 0.50143972436124562]], [[0.47455365439852981, 0.52938255252502708],
[-8.1344704388786511, -7.8174118039704279]], [[-7.1045971350718773, -0.27695679741136514], [-5.2997077604652958,
-0.92320795104797071]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.28115160685)+(1.-msk_arg0)*(-2.99624588284)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-3.7034845683259832, -3.2006988280486115, 4.6850337347787345, -1.5431340070704103],
[-3.9508001556883876, 4.7231128873762902, -3.0051096732527691, -0.071944916970104522], [3.2109725637398565,
4.0910170733379978, -3.7166755556626772, -4.402146700420734]], [[-1.5273991623031669, -1.4865381526416344,
3.902360473786171, -1.3538484671917517], [0.38707743115008331, 4.3855048056490773, 1.9022231675241139,
1.397387379628614], [1.0431068102446126, 3.0934379513218886, 2.0138255231319624, 4.2870052231295865]]],
[[[-4.2737086360299941, 4.2752748398653857, -3.7092106416006629, 1.417380944080846], [-2.4275128587779737,
-2.879911926405645, -4.23153844815229, -0.30555854124221682], [-2.6571106905165331, 2.6754859746804112,
-4.5544081791240201, -0.020082609244357563]], [[1.0570642052363857, -1.7647078574502792, 2.6330635742775668,
3.717540829723692], [4.9220552078075279, -3.9060168420798869, 1.4799017868437296, 2.7842835488914588],
[-2.0839669385912343, -4.8850626605172867, 1.7595980725429907, 3.0026383083452117]]], [[[-0.83195539201513036,
-1.2109400306251725, 2.0638657571201078, -0.86905066581365009], [-0.54092453152611775, 3.4954317917180884,
3.7826658876966359, -2.5779636206330894], [1.6720368874738147, 0.42564364358069096, -4.9027760864384096,
0.66861897918883617]], [[-4.1302737255553801, -3.2949127465748109, 1.5706320204575341, -2.2912291830881903],
[-2.19574275564025, 3.983182476523945, 2.032922034582441, -2.7459308093848711], [4.6025690264891459,
3.7012963844874829, 0.1748188819614116, 4.2002322255258893]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[7.984636175171131, 7.4818504348937598, -0.40388212793358669, 5.8242856139155581],
[8.2319517625335354, -0.44196128053114236, 7.286261280097917, 4.3530965238152524], [1.0701790431052913,
0.19013453350715004, 7.997827162507825, 8.6832983072658827]], [[5.8085507691483151, 5.7676897594867818,
0.37879113305897683, 5.6350000740368991], [3.8940741756950645, -0.10435319880392946, 2.3789284393210339,
2.8837642272165338], [3.2380447966005352, 1.1877136555232592, 2.2673260837131854, -0.0058536162844387007]]],
[[[8.5548602428751419, 0.005876766979762138, 7.9903622484458108, 2.8637706627643018], [6.7086644656231211,
7.1610635332507933, 8.5126900549974387, 4.5867101480873647], [6.938262297361681, 1.6056656321647367, 8.835559785969167,
4.3012342160895054]], [[3.2240874016087622, 6.0458594642954271, 1.6480880325675811, 0.5636107771214558],
[-0.64090360096238008, 8.1871684489250356, 2.8012498200014182, 1.496868057953689], [6.3651185454363821,
9.1662142673624345, 2.5215535343021571, 1.2785132984999361]]], [[[5.1131069988602782, 5.4920916374703204,
2.2172858497250401, 5.1502022726587979], [4.8220761383712656, 0.78571981512705946, 0.49848571914851192,
6.8591152274782372], [2.6091147193713331, 3.8555079632644569, 9.1839276932835574, 3.6125326276563117]],
[[8.4114253324005279, 7.5760643534199588, 2.7105195863876137, 6.5723807899333382], [6.4768943624853978,
0.29796913032120287, 2.2482295722627068, 7.0270824162300194], [-0.32141741964399806, 0.57985522235766496,
4.1063327248837362, 0.080919381319258576]]]])+(1.-msk_ref)*numpy.array([[[[0.70723868548106505, 0.20445294520369339,
-7.6812796176236526, -1.4531118757745078], [0.9545542728434695, -7.7193587702212083, 0.0088637904078510132,
-2.9243009658748136], [-6.2072184465847746, -7.0872629561829159, 0.72042967281775905, 1.4059008175758159]],
[[-1.4688467205417512, -1.5097077302032837, -6.8986063566310891, -1.6423974156531664], [-3.3833233139950014,
-7.3817506884939954, -4.898469050369032, -4.3936332624735321], [-4.0393526930895307, -6.0896838341668067,
-5.0100714059768805, -7.2832511059745046]]], [[[1.277462753185076, -7.2715207227103038, 0.71296475875574483,
-4.4136268269257641], [-0.56873302406694437, -0.11633395643927313, 1.2352925653073719, -2.6906873416027013],
[-0.33913519232838496, -5.6717318575253293, 1.558162296279102, -2.9761632736005605]], [[-4.0533100880813038,
-1.2315380253946389, -5.6293094571224849, -6.7137867125686101], [-7.918301090652446, 0.90977095923496876,
-4.4761476696886477, -5.7805294317363769], [-0.91227894425368383, 1.8888167776723686, -4.7558439553879088,
-5.9988841911901298]]], [[[-2.1642904908297877, -1.7853058522197456, -5.0601116399650259, -2.127195217031268],
[-2.4553213513188004, -6.4916776745630065, -6.778911770541554, -0.41828226221182874], [-4.6682827703187328,
-3.4218895264256091, 1.9065302035934915, -3.6648648620337543]], [[1.134027842710462, 0.29866686372989282,
-4.5668779033024522, -0.70501669975672776], [-0.8005031272046681, -6.9794283593688631, -5.0291679174273591,
-0.250315073460047], [-7.598814909334064, -6.697542267332401, -3.1710647648063297, -7.1964781083708074]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([0.57185536765716005,
-4.5016440600070959])+(1.-msk_arg0)*numpy.array([-0.4418100919929735, 1.7838290839713755])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.01685432532)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4449989576654145,
-8.5184983853296714])+(1.-msk_ref)*numpy.array([-4.4586644173155481, -2.2330252413511991])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-4.1734209340603439,
4.5527582003296185])+(1.-msk_arg0)*numpy.array([-1.7000682822887789, 0.76683988376374757])
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-1.5016152385157842, 0.80809700227400683])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.6718056955445597,
3.7446611980556117])+(1.-msk_ref)*numpy.array([-0.19845304377299477, -0.041257118510259261])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[3.3500126396534871, 3.4943903203535527, -1.7005861531401179, 1.4952347206139418,
-4.5979578172283739], [-2.3055331093587372, -3.6474162865795225, -3.0632961186256935, 4.7258384683418715,
-0.58388337502415943], [4.7641302227265427, -0.11182220465882864, 2.8628458472454756, 1.6967713595739653,
2.8474759788446562], [2.5863322473986914, 1.6349340161801535, -2.9934700314340712, 3.4068691472223609,
-0.97913156666695667]])+(1.-msk_arg0)*numpy.array([[-0.34407378508566389, 2.6789454460601672, -3.3795587578901665,
-4.1659261688389009, 2.3147542825953309], [-2.0615148857755603, -2.1181768528675784, 4.7855957803525566,
2.4248630846228734, 4.4597452365342818], [4.5985091304874572, 2.9992334161018466, 0.73974708846994552,
-0.24440017509511858, -0.49166350583553875], [1.5878740787090537, 3.0210382196579779, 3.6343442933400869,
1.5494651243470852, -3.3635312675197349]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-3.53998589595)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[6.8899985356048354, 7.0343762163049011, 1.8393997428112305, 5.0352206165652902,
-1.0579719212770256], [1.2344527865926112, -0.10743039062817417, 0.47668977732565487, 8.2658243642932199,
2.9561025209271889], [8.3041161186778911, 3.4281636912925197, 6.402831743196824, 5.2367572555253137,
6.3874618747960046], [6.1263181433500398, 5.1749199121315019, 0.54651586451727718, 6.9468550431737093,
2.5608543292843917]])+(1.-msk_ref)*numpy.array([[3.1959121108656845, 6.2189313420115155, 0.16042713806118192,
-0.6259402728875525, 5.8547401785466793], [1.4784710101757881, 1.42180904308377, 8.325581676303905, 5.9648489805742217,
7.9997311324856302], [8.1384950264388056, 6.539219312053195, 4.2797329844212939, 3.2955857208562298,
3.0483223901158096], [5.1278599746604021, 6.5610241156093263, 7.1743301892914353, 5.0894510202984335,
0.17645462843161352]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[2.0867998826855514, 2.311725929216629, -3.4719596731221403, 1.7817832811577139,
1.5141982978301929], [3.1010865709749673, -2.1704923524391537, 3.7204405507466163, 4.629811066660821,
1.6635344950905893], [-2.574527711983543, -1.6203338172344193, 3.7119433126415871, -4.2495237660622687,
-2.1154248806831588], [0.14708606411584846, -4.3739162090051034, 0.28212084215683131, -3.2454357930486841,
4.0490170686662843]])+(1.-msk_arg0)*numpy.array([[2.5664289274057825, -3.8876267007915413, 3.0606117231617365,
0.45673258502894409, -2.4098041248367421], [2.4831763479900646, -4.4003484897067748, -3.6829879581883054,
-4.4939546625771341, 0.095882545889256932], [4.8837698588887477, 4.688682977288769, -1.7221295814057069,
-2.8466915452782313, -1.7320653753684723], [0.9117963691890596, -0.77307239329958843, -4.179217925450148,
-2.8549317288801146, 2.3840070557997581]])
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.235357796595256, -1.480870361715898, 4.6623764990451662, -0.75539625063053251,
1.764045470290668], [0.076611613190003425, -4.3375283365611939, -0.16726979393963415, 3.2199460507232871,
-3.1622960810628884], [0.33570324662007156, 1.8340432936621101, 3.3105489280357343, -4.5476113052695135,
1.6510039686145541], [1.9731991965232831, -1.2055959073743616, 3.1802938969891557, 1.6969195493915894,
4.7202410276309497]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.1485579139097046, 3.792596290932527, -8.1343361721673055, 2.5371795317882464,
-0.24984717246047516], [3.0244749577849639, 2.1670359841220401, 3.8877103446862504, 1.4098650159375339,
4.8258305761534777], [-2.9102309586036146, -3.4543771108965293, 0.40139438460585275, 0.29808753920724484,
-3.7664288492977129], [-1.8261131324074347, -3.1683203016307417, -2.8981730548323243, -4.9423553424402735,
-0.67122395896466536]])+(1.-msk_ref)*numpy.array([[-0.66892886918947347, -2.4067563390756432, -1.6017647758834297,
1.2121288356594766, -4.1738495951274102], [2.4065647348000612, -0.062820153145580981, -3.5157181642486712,
-7.7139007133004212, 3.2581786269521453], [4.5480666122686761, 2.854639683626659, -5.0326785094414408,
1.7009197599912822, -3.3830693439830264], [-1.0614028273342235, 0.43252351407477319, -7.3595118224393037,
-4.5518512782717035, -2.3362339718311915]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-4.4842044121065818, 4.4928465492805714], [-0.8488426741388917,
-4.6899835951885827]], [[-3.8520901512994188, -4.2130527577972394], [-1.9136871018997637, -1.6373131250552273]],
[[-4.193784678832186, -0.65999502399047039], [-0.57202165743856082, -2.5346604397115016]], [[-3.7518661140733869,
-4.2333127035460327], [-1.185325910878734, 0.27013359391463077]], [[4.7175093778110231, -2.5123249429723304],
[3.8200721640724424, 4.5930030128887935]], [[-4.7065467532244636, -1.7055937731234607], [-1.331474083968188,
-4.3474657122786562]]])+(1.-msk_arg0)*numpy.array([[[3.7753100517410338, 3.7909180149825001], [0.99488027773324017,
-3.0286171370440904]], [[3.6374742681576677, 1.145681069564648], [-0.002113196116643401, -0.3884450840387661]],
[[-2.5595107128502024, -3.4455619811895488], [-4.5771852308072871, 1.3642969267838581]], [[-0.18445690526205638,
0.49675060587106934], [-3.8670923300147821, -4.1547783162827496]], [[-3.9389053222472947, 1.3272580810242838],
[3.5990473792265725, 2.2029039321052881]], [[-4.5403710311302428, -0.87738273585574866], [3.383530992594503,
3.353845130538776]]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.30522721091)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-0.17897720120150851, 8.7980737601856447], [3.4563845367661816,
-0.38475638428350933]], [[0.45313705960565454, 0.092174453107833898], [2.3915401090053097, 2.667914085849846]],
[[0.11144253207288735, 3.6452321869146029], [3.7332055534665125, 1.7705667711935718]], [[0.55336109683168644,
0.071914507359040591], [3.1199013000263394, 4.5753608048197041]], [[9.0227365887160964, 1.7929022679327429],
[8.1252993749775158, 8.8982302237938669]], [[-0.40131954231939027, 2.5996334377816126], [2.9737531269368853,
-0.042238501373582871]]])+(1.-msk_ref)*numpy.array([[[8.0805372626461072, 8.0961452258875735], [5.3001074886383135,
1.2766100738609829]], [[7.942701479062741, 5.4509082804697213], [4.3031140147884299, 3.9167821268663072]],
[[1.7457164980548709, 0.8596652297155245], [-0.2719580199022138, 5.6695241376889314]], [[4.1207703056430169,
4.8019778167761427], [0.43813488089029118, 0.15044889462232369]], [[0.36632188865777859, 5.6324852919293571],
[7.9042745901316458, 6.5081311430103614]], [[-0.23514382022516944, 3.4278444750493247], [7.6887582034995763,
7.6590723414438493]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[2.0747863514072602, 3.5162387004014164], [0.60187542749819123,
-1.4464372144424988]], [[-0.79722414010768361, -3.2668034589151995], [3.6997621801618905, -2.3676159172001379]],
[[-4.2988464154229238, 2.9969641407457024], [2.6364527127840898, 1.1108844479920323]], [[-1.1644293703177389,
4.7657018637717314], [3.954999187366635, -3.1405571932690379]], [[1.9169748083366978, 3.5980880196573022],
[4.6505164496107305, -3.4906561986190576]], [[-4.799933282554286, 3.4274402953401566], [-3.2690807817244751,
3.3152263479112811]]])+(1.-msk_arg0)*numpy.array([[[-0.19327777478115316, 1.1303927856512574], [0.070720562815962396,
0.2691348667587512]], [[-2.2293468476711373, 4.4261476420776269], [-2.1677478226049174, 3.9963032240053238]],
[[-3.0163006244468358, 0.039611843610902753], [-1.7062014469652445, -0.85393662791937786]], [[2.3270289055995583,
1.3694144393292564], [1.7400166142803206, 1.0276232159123833]], [[0.34573570990013103, -3.0575470737366093],
[-0.16902625990476849, -2.0541180978179363]], [[1.4322948650591076, 1.3766358910177399], [-1.2248059444270067,
3.8606015627846109]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-0.065448649070245501, -1.0082288113108073], [2.940556380714975, 2.6943538184190166]],
[[4.4012174975531551, 2.1536331083101858], [0.42620647125632161, -3.3632985397458972]], [[0.7816306898353016,
-4.1519556164933835], [4.587159382474109, 3.7392943755961223]], [[1.0899221203445091, -3.3294088440228276],
[4.0864323956812836, -1.4550107947057112]], [[3.7465493005246273, 3.1852429656365171], [-3.8082443104157484,
-2.7860725384289688]], [[-4.7267420836233232, 0.9545260667209563], [-4.6866116848499395, 0.18931611034152862]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[2.1402350004775057, 4.5244675117122242], [-2.3386809532167838, -4.1407910328615154]],
[[-5.1984416376608387, -5.4204365672253854], [3.2735557089055689, 0.99568262254575934]], [[-5.0804771052582254,
7.1489197572390859], [-1.9507066696900193, -2.62840992760409]], [[-2.2543514906622479, 8.095110707794559],
[-0.13143320831464855, -1.6855463985633268]], [[-1.8295744921879296, 0.41284505402078508], [8.4587607600264789,
-0.70458366019008878]], [[-0.07319119893096282, 2.4729142286192003], [1.4175309031254644,
3.1259102375697525]]])+(1.-msk_ref)*numpy.array([[[-0.12782912571090765, 2.1386215969620648], [-2.8698358178990127,
-2.4252189516602654]], [[-6.6305643452242924, 2.2725145337674411], [-2.593954293861239, 7.359601763751221]],
[[-3.7979313142821374, 4.1915674601042863], [-6.2933608294393535, -4.5932310035155002]], [[1.2371067852550492,
4.698823283352084], [-2.346415781400963, 2.4826340106180944]], [[-3.4008135906244963, -6.2427900393731264],
[3.6392180505109799, 0.73195444061103254]], [[6.1590369486824308, 0.42210982429678356], [3.4618057404229328,
3.6712854524430822]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[-2.3915780501354655, -1.5713847750997409, -2.0965030994469647,
-2.1551686098873679], [3.3436095870548073, 3.0694003316976595, -0.81504636610563619, -2.4693923873770118],
[-2.319226534159692, -3.4872612814422221, -0.52829274680925575, 0.30144897282215677]], [[-4.8403842893778295,
2.5434758702608882, -0.65369410807785577, 2.38720324090216], [-4.436083252026509, -1.9729315005424199,
2.1429634466708958, -4.4793507786187812], [-4.6789538924900809, -1.2258958461641156, 4.6704546977809684,
-4.1766942543918724]]], [[[-0.99705682132654605, 0.63139391938552247, 3.648090293364568, 0.9177900253507385],
[-2.6400178360936186, -4.9693099498708015, -2.0831389743339668, -4.8827387011810046], [0.92728596167706101,
-0.70240094092455596, -4.2968994749977032, -1.8662843872126853]], [[4.4680529533242801, -0.62442225881816338,
3.3149308543703011, -4.9724764458198436], [-2.5558868654124858, 0.54392866523319405, -4.0956874823606899,
-3.1974475782258649], [-1.1195425378333166, 1.4289844369376148, -1.2628376022909036, -2.0331849636301413]]],
[[[0.15561155815826755, 1.0566531457111576, -1.6429274914523804, -3.1609988875067607], [-3.9169786589191657,
-0.0862972748194899, -2.2849500782464229, -3.593674280376046], [4.5374768908066745, 1.9914470295786906,
-2.6711697877422749, -4.6476473568202383]], [[4.7467779338409635, 1.4258515187294085, 1.3579719892502204,
3.8840821581627232], [-0.74410865296963191, -4.1369032807050301, 0.10505268333553275, 0.20183690831668066],
[3.2258506139241625, -0.19145900822910011, -1.9876999864961387,
-1.118642852906222]]]])+(1.-msk_arg0)*numpy.array([[[[-4.1006205365267823, 0.98436034016399709, -2.0659912052394889,
-4.829130675843226], [-3.2904658009023189, 1.8958877020661227, -4.4668040074389035, 4.7329845357071445],
[-1.7781671949524789, -0.77841298536212911, -2.7909279205249824, -3.9295744077558559]], [[-1.3638768752460773,
4.4638784666445588, 2.5119633402011221, 3.4658456524464523], [-2.3171958056993116, -2.6568253529176222,
2.2419660036743707, -1.9675169728612851], [-1.1143935824519682, 1.186871291556292, 1.8459225649295181,
3.4239497313955702]]], [[[3.2057908235968178, 1.0054526017670806, 2.8530443499731035, 2.9117538931912392],
[3.7690204040343271, -0.2740720613800427, -1.6755030321386211, -4.0975186439069002], [1.5575983081276483,
-1.4138189638564769, -4.5799310621318954, -2.1831715499767892]], [[4.2307751851678876, -4.574159194957832,
3.6142529396906227, 2.9695212799242778], [1.1073212833019772, 3.5565535386979832, 4.1163170572300576,
-2.6051576587226011], [-2.4885332002171778, 2.7193644049811461, 3.6891018981647203, 2.2279362582877482]]],
[[[3.8371440925068896, 0.098907118846149444, -4.8484985466419248, -3.2646614116360695], [-1.3166337092696869,
3.8989945382792683, 2.4522596196795661, 4.8579102565531542], [2.8065577922030176, -2.6140964300168221,
0.26485552888380326, -3.2455906809923696]], [[3.4179005303801677, -4.9507538637080364, -3.9286015574556798,
0.67686821786057827], [1.2296342635912527, -1.0149250475477691, 1.9729311750755993, -4.6474538783990385],
[-1.2276432760256037, 2.5170369379074629, 0.97453825295943464, 3.8596939709877667]]]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.09580908291)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-5.4873871330441668, -4.6671938580084422, -5.1923121823556659, -5.2509776927960692],
[0.24780050414610599, -0.026408751211041803, -3.9108554490143375, -5.5652014702857127], [-5.4150356170683933,
-6.5830703643509239, -3.624101829717957, -2.7943601100865445]], [[-7.9361933722865308, -0.5523332126478131,
-3.749503190986557, -0.70860584200654131], [-7.5318923349352103, -5.0687405834511212, -0.95284563623780549,
-7.5751598615274824], [-7.7747629753987821, -4.3217049290728173, 1.5746456148722672, -7.2725033373005736]]],
[[[-4.0928659042352473, -2.4644151635231788, 0.55228121045586676, -2.1780190575579628], [-5.7358269190023199,
-8.0651190327795028, -5.1789480572426676, -7.9785477840897059], [-2.1685231212316403, -3.7982100238332572,
-7.3927085579064045, -4.9620934701213866]], [[1.3722438704155788, -3.7202313417268646, 0.21912177146159983,
-8.0682855287285449], [-5.651695948321187, -2.5518804176755072, -7.1914965652693912, -6.2932566611345662],
[-4.2153516207420179, -1.6668246459710865, -4.3586466851996049, -5.1289940465388426]]], [[[-2.9401975247504337,
-2.0391559371975436, -4.7387365743610816, -6.256807970415462], [-7.012787741827867, -3.1821063577281912,
-5.3807591611551242, -6.6894833632847472], [1.4416678078979732, -1.1043620533300107, -5.7669788706509761,
-7.7434564397289396]], [[1.6509688509322622, -1.6699575641792928, -1.7378370936584808, 0.7882730752540219],
[-3.8399177358783332, -7.2327123636137314, -2.9907563995731685, -2.8939721745920206], [0.13004153101546123,
-3.2872680911378014, -5.0835090694048404, -4.2144519358149228]]]])+(1.-msk_ref)*numpy.array([[[[-7.1964296194354835,
-2.1114487427447042, -5.1618002881481901, -7.9249397587519272], [-6.3862748838110202, -1.1999213808425786,
-7.5626130903476048, 1.6371754527984432], [-4.8739762778611802, -3.8742220682708304, -5.8867370034336837,
-7.0253834906645576]], [[-4.459685958154779, 1.3680693837358575, -0.58384574270757916, 0.37003656953775099],
[-5.4130048886080129, -5.7526344358263231, -0.85384307923433056, -5.063326055769986], [-4.2102026653606694,
-1.9089377913524093, -1.2498865179791832, 0.32814064848686897]]], [[[0.1099817406881165, -2.0903564811416206,
-0.24276473293559775, -0.18405518971746204], [0.67321132112562587, -3.369881144288744, -4.7713121150473228,
-7.1933277268156015], [-1.5382107747810529, -4.5096280467651777, -7.6757401450405967, -5.2789806328854905]],
[[1.1349661022591864, -7.6699682778665332, 0.5184438567819214, -0.12628780298442344], [-1.9884877996067241,
0.46074445578928191, 1.0205079743213563, -5.7009667416313023], [-5.5843422831258795, -0.3764446779275552,
0.59329281525601907, -0.86787282462095305]]], [[[0.74133500959818832, -2.9969019640625518, -7.9443076295506261,
-6.3604704945447708], [-4.4124427921783882, 0.80318545537056707, -0.6435494632291352, 1.762101173644453],
[-0.28925129070568367, -5.7099055129255234, -2.830953554024898, -6.3413997639010713]], [[0.32209144747146645,
-8.0465629466167385, -7.0244106403643816, -2.418940865048123], [-1.8661748193174486, -4.1107341304564704,
-1.1228779078331019, -7.7432629613077397], [-4.323452358934305, -0.57877214500123841, -2.1212708299492666,
0.76388488807906541]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.7227008802641297, 2.8863279383915197, -2.1896969198324809, 4.4302312776864046],
[-2.0197165879770251, -4.9702841021278301, 4.7902292515365534, 4.5956841306054539], [2.93934146754248,
1.646059013328439, 0.99083597111362476, 3.3910212199665182]], [[1.2787313823171234, 1.3787912835332463,
-0.071420926539018659, -3.2388329800378415], [3.349975825307892, -1.7816480803027677, 2.2965490165680036,
-1.9018094630451774], [-1.1200280993595113, -0.66137198885848481, 0.21843601647543931, -1.4087126883569634]]],
[[[-1.9454924644698748, 0.7634003631723596, 4.1323447887625271, 1.443298202567032], [-3.7655189569283309,
0.99421618018940361, 4.6046246816957677, 0.74421638400845325], [-3.5622774770791485, 1.2397714154717114,
-1.0582462113989424, 1.2971002709503896]], [[1.3379013865706302, -0.39532127021252883, 4.7111583084640927,
-0.88268548014728054], [-4.8134271904014936, -3.1339427094271266, 2.1759548242117912, -1.5082214108365442],
[-3.2684776044041417, -1.1500337944886265, -4.7387724227769104, -0.87161000308836023]]], [[[-0.77862807704750203,
4.0600646358304022, -0.60854424079166769, -2.1879855865994302], [-3.1756330451114421, 2.3997197078889831,
3.48971886859092, 1.3336201149028941], [-3.1489389998309738, -3.126335897832373, 4.4951625025311497,
4.8070472911835367]], [[-0.48575660505239959, 2.6019068715787999, 2.0846243088005867, -4.1696787529733781],
[4.548381761398181, 2.8671236182352331, -2.4623488980316131, -2.7420314450035965], [-2.6608024266156169,
1.9100775920001078, -4.7415568715485605, 4.335606352714283]]]])+(1.-msk_arg0)*numpy.array([[[[-0.066813883761502879,
2.3352842907581648, 1.9822821047204409, 2.2488083940398429], [-3.5461097023132315, 3.9394596449218593,
-3.1392993492194812, -1.195903516669877], [-2.4035485197244641, -1.2564828050011667, 4.2913267573861269,
3.3717435374804552]], [[2.0596631449037996, 1.766648199074103, 3.7051853214349446, 2.3190764926321314],
[2.0765914923551589, -3.0883600344375619, 3.3829116246346622, -0.77543432130851642], [-1.0720358718139797,
1.757742252759785, 0.37504357425332913, 2.5917331896567894]]], [[[-4.2390464570087687, 0.22513794491672012,
2.4848124269039893, 0.40807584912367822], [3.4683686407549388, 0.16480295091112396, 2.9378502257107231,
-4.5549662855752659], [-3.9215318218115671, -0.029245135802563382, -2.553738608483358, 2.451028936659565]],
[[-3.607109515954888, -3.993893382807582, 0.073725334847121182, -2.1709804092290019], [0.33344114744651687,
-0.58055248058879805, -2.3870396661749513, 4.2208499406342153], [2.5521039977169968, 0.99728976648084799,
-2.2262756306598854, -1.1032924331956737]]], [[[4.966005471135805, -2.2223123277417969, 3.9211131181159882,
1.2020059492898092], [-1.9322416802383922, 1.9213246599314813, -4.9244981020217091, 3.9198631295221293],
[0.441626539964318, -4.8403835386932315, 0.067761940060105275, -3.3198352912434692]], [[3.7289362256010783,
4.4966066758919183, 4.7606849535179343, -4.2473703353049705], [-4.1395576165491477, 1.394341710895656,
1.1616039705755199, 3.875922408907579], [3.2845443210135308, 4.7114056834057489, 1.3775615753886044,
-3.9913085015805105]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.3801587913318047, -1.0654264899468902, 0.63456255053258914, 0.019001978473544234],
[2.9683175293448718, 3.6853848061948291, -3.378954186340446, 0.041808892524110242], [-0.43021730340079323,
3.8311628728062068, -2.1793361115232388, 3.1444277139479642]], [[4.3924534836876852, 4.6258828194696662,
-1.337885612990688, 4.1259618924749724], [-4.4897704263159222, 0.15369009386017485, 2.6962829695946411,
2.0341337355139855], [-2.3651800760864128, 2.5627722574362446, 4.595047097795911, -4.3290029697233141]]],
[[[4.178160967558556, -4.7760981608958755, -1.9186656311572339, 1.2206352911034513], [-0.085772738154105355,
-1.3074976307569877, -1.956712770838227, 3.2327542991796854], [1.1038448851539249, 3.3037436446982742,
-0.27900134960503031, -0.91401651221828928]], [[-2.3182986051061674, -1.849076104179066, 1.7255801749968178,
-4.800626013966669], [-1.065938515473257, -1.7027357609246607, -4.5450782032387052, 4.2110109611691939],
[0.42483398312984555, -2.0300778021420198, -1.8600131931716146, -0.65565335271777681]]], [[[1.5409796879232402,
3.8526060616448135, 4.5046749363240419, -2.3425879056067043], [-2.0020629040094438, 4.0201378578644089,
-0.91649358299543415, -4.6048272916989044], [2.9771669103984353, -0.18712141889004474, 4.7604346415721732,
3.1208517541066438]], [[-2.0397258049700016, 4.8476984980922424, -0.83670507076672873, -0.65429392708528322],
[1.8648243434781451, -3.0663012456143979, -0.46932892583376429, 1.7337690374943984], [-2.830192293285001,
2.4237706377557764, -4.5545542759999593, -1.4623880986500994]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[6.1028596715959349, 3.9517544283384098, -2.8242594703650701, 4.4112292992128603],
[-4.9880341173218969, -8.6556689083226601, 8.169183437876999, 4.5538752380813436], [3.3695587709432733,
-2.1851038594777679, 3.1701720826368636, 0.24659350601855401]], [[-3.1137221013705618, -3.2470915359364199,
1.2664646864516693, -7.3647948725128138], [7.8397462516238141, -1.9353381741629425, -0.3997339530266375,
-3.9359431985591629], [1.2451519767269015, -3.2241442462947294, -4.3766110813204717, 2.9202902813663507]]],
[[[-6.1236534320284308, 5.5394985240682351, 6.051010419919761, 0.22266291146358075], [-3.6797462187742256,
2.3017138109463913, 6.5613374525339943, -2.4885379151712321], [-4.6661223622330734, -2.0639722292265628,
-0.77924486179391206, 2.2111167831686789]], [[3.6561999916767975, 1.4537548339665372, 2.9855781334672749,
3.9179405338193884], [-3.7474886749282366, -1.4312069485024659, 6.7210330274504964, -5.7192323720057381],
[-3.6933115875339873, 0.8800440076533933, -2.8787592296052957, -0.21595665037058343]]], [[[-2.3196077649707423,
0.20745857418558877, -5.1132191771157096, 0.15460231900727406], [-1.1735701411019983, -1.6204181499754258,
4.4062124515863541, 5.9384474066017985], [-6.1261059102294091, -2.9392144789423282, -0.26527213904102354,
1.6861955370768928]], [[1.553969199917602, -2.2457916265134426, 2.9213293795673154, -3.5153848258880949],
[2.6835574179200359, 5.9334248638496305, -1.9930199721978488, -4.4758004824979949], [0.16938986666938405,
-0.51369304575566854, -0.18700259554860121, 5.7979944513643824]]]])+(1.-msk_ref)*numpy.array([[[[1.3133449075703019,
3.4007107807050549, 1.3477195541878517, 2.2298064155662987], [-6.5144272316581038, 0.25407483872703018,
0.2396548371209648, -1.2377124091939873], [-1.9733312163236709, -5.0876456778073731, 6.4706628689093657,
0.22731582353249102]], [[-2.3327903387838855, -2.8592346203955632, 5.0430709344256321, -1.8068853998428409],
[6.5663619186710811, -3.2420501282977368, 0.68662865504002113, -2.8095680568225019], [1.293144204272433,
-0.80503000467645958, -4.2200035235425819, 6.9207361593801036]]], [[[-8.4172074245673247, 5.0012361058125956,
4.4034780580612232, -0.81255944197977303], [3.5541413789090441, 1.4723005816681116, 4.8945629965489506,
-7.7877205847549513], [-5.025376706965492, -3.3329887805008376, -2.2747372588783277, 3.3650454488778543]],
[[-1.2888109108487207, -2.144817278628516, -1.6518548401496966, 2.6296456047376671], [1.3993796629197739,
1.1221832803358627, 2.1580385370637538, 0.0098389794650213247], [2.1272700145871513, 3.0273675686228678,
-0.36626243748827081, -0.44763908047789691]]], [[[3.4250257832125648, -6.0749183893866103, -0.58356181820805375,
3.5445938548965135], [0.069821223771051599, -2.0988131979329276, -4.008004519026275, 8.5246904212210346],
[-2.5355403704341173, -4.6532621198031867, -4.6926727015120679, -6.4406870453501135]], [[5.7686620305710798,
-0.35109182220032409, 5.597390024284663, -3.5930764082196873], [-6.0043819600272927, 4.4606429565100534,
1.6309328964092842, 2.1421533714131806], [6.1147366142985318, 2.2876350456499726, 5.9321158513885637,
-2.5289204029304111]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(4.62465145684,self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.83968289402)
sub=res.substitute({arg1:s1})
ref=Data(-22.3818465465,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(3.61282962415,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-1.5808963858957537, 2.6509696096021864])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-5.7115092956749542, 9.5775015382906385]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(1.49025589385,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[4.1668790679299175, -2.8153908003971773, 3.9030969110369043, -0.17962827030748763,
2.1351246321738504], [-3.6496148227091672, 1.387147904486504, -1.1823464175604426, 1.1752089388333173,
-1.7608073918316345], [1.1775740286573697, 1.5139307880954602, -4.5926484431964676, -1.3565377974967943,
2.5163302873137585], [-4.9066894975281929, -0.71152046468118701, 3.1503673258369584, -1.582645414456433,
4.0919813724626888]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[6.2097160899280084, -4.1956527337729987, 5.8166131759266637, -0.26769208852717874,
3.1818820671938379], [-5.4388599998117435, 2.0672053402977806, -1.7619987173376985, 1.7513620475973966,
-2.6240535936055083], [1.7548866366471667, 2.2561442798348774, -6.8442214108383101, -2.0215884479450947,
3.7499760415336878], [-7.3122229429659207, -1.0603475660835118, 4.6948534751098423, -2.3585466567627744,
6.0980993578224529]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.8189372207,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[-2.1082618280451539, -1.3955382894516588], [-3.4395290563777836, 4.8101517504101992]],
[[-2.0990702249254642, -0.19805771431177988], [3.5892783984855807, 2.3466998684539329]], [[1.0198837335276068,
1.3499161645518498], [2.1694219720539518, -4.6399343230104897]], [[0.39865646301668001, -4.6889954108117458],
[-1.8077039797673278, -2.0729006873515732]], [[-1.4351307436857752, -0.27135779329323384], [-0.11640854123168598,
3.5462285443877146]], [[-4.7284739075521287, -2.9968208233684992], [-0.63458679250348737, -0.3430241153835798]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[10.159581394141497, 6.7250114059464261], [16.574874591447742, -23.179819307252547]],
[[10.11528763575015, 0.95442769134320948], [-17.296507269905895, -11.308599341897418]], [[-4.9147556842796076,
-6.5051612501794489], [-10.454308288528681, 22.359552210744798]], [[-1.9211004679024974, 22.595974512838154],
[8.7112119921028786, 9.9891782770868964]], [[6.9158049573139522, 1.307656170226968], [0.56096545214841331,
-17.089052725648113]], [[22.786218910197551, 14.441491409490299], [3.058033914157781,
1.6530116772185961]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-0.0961090764765,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-1.3508458008250446, -1.7001494994927766, -1.1781435318758104, 0.94544141796815939],
[0.48819490611616523, -1.7587019774587032, -3.7925425828521075, 4.7932646338579481], [0.58005604545753187,
-1.1056764808385546, -0.51753380988709718, 0.78769257503235046]], [[2.2158543714442409, 2.8083791437737293,
4.2821497276046312, 3.557157624087198], [1.3939326777613186, 1.1701609581270382, 0.2011137853628675,
2.0268107124192767], [4.9657490768337222, -0.36506867019444478, 1.9579179042357691, 2.3678413818675184]]],
[[[4.6392916421533705, 4.4042259724661363, -0.16399061756687949, -2.3651374585729892], [-3.2439831086523521,
-0.75751658111162179, 0.90117784493079078, 3.4349562957397168], [2.5698732564560274, 2.3597838208145365,
-4.9703254679114597, 3.3611480514859409]], [[3.5875371307296735, 3.7296897049486226, 0.57748787491421005,
-3.5628770219331427], [-2.1948129576712505, 4.1072354163079758, 3.4869674239178412, 1.087096416125072],
[-1.8854495462953502, -0.47642374646276409, -4.0405955013288795, 2.2924875059879755]]], [[[4.5285297494026793,
-2.5139281223909449, 2.208483948920561, -2.7970423581490991], [-1.7523437586040416, 1.8001610169726279,
-2.0370614441012389, -1.7571467006380495], [-4.7379091083725093, -1.7199833882948159, -1.9229770651511577,
1.983783387985067]], [[3.6886437141901496, 1.3947955105400336, -1.5553692753039892, -3.1732704417249646],
[-3.1233013224428783, -0.18349036348379588, -2.6033177681431141, -4.8475794865627444], [0.44359684613617745,
2.4458348840139719, -4.4593547045866213, 2.8568083531481321]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.12982854237943392, 0.16339979826821033, 0.11323028680532936, -0.090865501543539021],
[-0.046919961567349337, 0.16902722285092533, 0.36449776513566223, -0.4606762372674883], [-0.055748650833525863,
0.10626554545516434, 0.049739696513606095, -0.075704405933744209]], [[-0.21296371724588994, -0.26991072590391707,
-0.41155345565411638, -0.34187513413231108], [-0.13396958233003523, -0.11246308901442943, -0.019328860177915387,
-0.19479490576326494], [-0.47725355778845102, 0.035086412742886867, -0.18817368159287634, -0.22757104845409318]]],
[[[-0.44587803523243974, -0.42328609080747298, 0.015760986805161359, 0.2273111768833927], [0.31177622067789706,
0.072804219026262926, -0.086611370417368036, -0.33013047732063483], [-0.24698814533960797, -0.22679664370263819,
0.4776933905085276, -0.32303683512905357]], [[-0.34479488045953066, -0.35845703308646898, -0.055501826334373089,
0.34242482017728643], [0.21094144640040824, -0.39474260273287426, -0.33512921881633451, -0.10447983259467819],
[0.18120881463745528, 0.045788646284003677, 0.38833790204776181, -0.22032885703438657]]], [[[-0.43523281201138325,
0.2416113101712597, -0.21225535274389759, 0.26882115790732197], [0.16841614030876842, -0.17301181285021075,
0.19578009411842659, 0.16887774663202701], [0.45535606883521218, 0.16530601500391165, 0.18481554981714085,
-0.19065958934863894]], [[-0.35451214082160976, -0.13405250839155117, 0.1494851046293674, 0.30498009156431655],
[0.30017760565777185, 0.017635089376762319, 0.25020246647106092, 0.46589638759990259], [-0.042633683210029791,
-0.23506693191655562, 0.42858446233889236, -0.27456521249137733]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([0.27659058494851418, 4.1541405281225892]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(3.8719303416)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.0709394780619239, 16.08454275409127]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.7426337922420245, 3.9291817340183464]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([0.24137850302835329, -1.7566967446888535])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([0.90339134215470884, -6.9023807614409334]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-4.5985565783651303, 1.9500350746471993, 4.9713294753194841, -4.1975843272554876,
-2.7203349447146974], [0.6454726334120906, 4.0369461924736108, -0.05835204361358759, -4.4624467830414529,
-3.7659400185729366], [2.0977327530498648, 0.21380148281940414, -2.0069737126558609, 2.3624658088422841,
4.2025541873980536], [3.414052245462365, -0.88493614463302261, -1.2061553540091854, 4.49473416391168,
2.5601548526811913]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(1.33330352327)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-6.1312716879061586, 2.5999886355337587, 6.6282911047962196, -5.5966539727668181,
-3.6270321662717726], [0.86061093630488039, 5.3824745816900785, -0.077800985340197651, -5.9497960182490823,
-5.021141095199475], [2.7969144705273297, 0.2850622703241823, -2.6759051222010943, 3.1498839865422879,
5.603280304805077], [4.5519678875143299, -1.179888479511161, -1.608171183115485, 5.9928448969206256,
3.4134634852052463]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-0.80021427988594684, -1.1537754790930244, 2.8892977581295964, 3.7382841198075329,
2.3764815828521835], [0.29503112693550992, -2.1167484120073929, -3.8919266120092, 1.3224523748721193,
2.9631976530881987], [-2.8104707825838995, -2.0632038633268568, -3.0589018293825343, -1.678440560478224,
-4.5543583893018766], [-0.82692491278534597, 3.2768573560743448, 3.9160327845735843, 2.3191381875549339,
4.4585962073227758]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[0.84019739061084842, -0.26041818053762533, -4.1862385457734819, 2.5897362481178323,
1.3358121377857461], [-2.9413855411668366, -0.72774497299829122, -3.5296778480396993, -2.4701281082949977,
1.8361878086654535], [2.766871279727253, -4.8815007323834605, -2.5917825490500368, -4.9262737607479554,
-2.1516904953173679], [1.8198771262398861, -3.8256760862124253, 4.2065485224111754, 2.1262330288097564,
0.59770635028954899]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.67233794988971163, 0.3004641110143324, -12.095289645299022, 9.6811698908288335,
3.174532943598229], [-0.86780029096226652, 1.5404530159404959, 13.737247148605071, -3.2666267830530953,
5.4409874052666343], [-7.7762108908439682, 10.071531169886436, 7.9280083806508852, 8.2684576920589663,
9.799569658529764], [-1.5049017338959638, -12.536194825062895, 16.472981923661731, 4.9310282127532963,
2.6649312664937215]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.93968623179119604, 3.5541982898616844], [-4.4072293043240576, -3.6076482066197646]],
[[1.7484917853830488, 4.9003868861327007], [4.7568448118670297, -2.9711396711199276]], [[3.8141950057192862,
1.8792050658033421], [-3.9338669245816726, 1.8697569231208337]], [[2.7319382834348493, 4.8419678958442489],
[1.6265368297291136, 2.7028283208639312]], [[-2.4918167983349662, 0.66893189862375824], [-0.98140319773957252,
-4.1758241797866216]], [[2.3073753559238792, -1.9496085178777891], [-0.68687199404203181,
-1.8466377636332689]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.7863046684)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[4.497624598049291, -17.011475867168713], [21.094342193976221, 17.267303453273268]],
[[-8.3688143950302809, -23.454744630041624], [-22.767708529872653, 14.22077967833674]], [[-18.255899362045483,
-8.9944479793272372], [18.828685625972383, -8.9492262898982791]], [[-13.075888959772939, -23.175133544101051],
[-7.7851008214498698, -12.936559810022931]], [[11.926594374657277, -3.2017118692216404], [4.6972947069193065,
19.986766766112023]], [[-11.04380143779947, 9.331420350662329], [3.2875786316735858,
8.8385709489135635]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-3.7703901363085679, -1.2567782536949124], [-4.6867702962008151, -4.0264117118029379]],
[[4.9065305059289148, -2.9124024019871833], [2.1575041693932544, -2.447359134212459]], [[-4.2014241124001597,
2.1567970243772496], [-0.21332683825097298, -4.1649402688134085]], [[2.4269573018600399, 1.862212593201459],
[-0.8497946023648062, -0.85421311729337468]], [[-3.9387175807922148, 2.8138330952953154], [-3.5224045840887532,
2.3328843983658345]], [[-4.1835084349073455, 1.5103476384019734], [3.5299839973193272,
0.52130047189201001]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.7025504227899226, 0.94126871779523835], [0.2494407260336402, -1.4801201322289135]],
[[2.2922758002791603, -0.52083778427225891], [-3.8446937528722511, 4.7302904730413342]], [[4.5317616448777596,
4.7075623194640848], [3.6913207941396351, -0.94890681756314521]], [[0.10066689821055874, -0.2379214158604368],
[3.5670839439496831, 1.12875998069849]], [[-3.2904800400470879, 0.6205159319494804], [3.0069046365957437,
-0.47921917980699202]], [[-2.06878107963567, -2.1195493051668968], [-1.6911454119040039, 1.930100136733504]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-13.960059593272241, -1.182966055408349], [-1.1690713854372303, 5.9595730352818101]],
[[11.247121142072317, 1.5168892139602093], [-8.2949428018620797, -11.576719596675883]], [[-19.039852646439627,
10.153256402690602], [-0.78745779398387905, 3.952140215820322]], [[0.24431426366771691, -0.44306025680762678],
[-3.0312886817506057, -0.9642015817884666]], [[12.960271582979336, 1.7460282654774637], [-10.591534675862574,
-1.1179629479694033]], [[8.6547630966325499, -3.2012562875353661], [-5.9697162411611355,
1.0061621120780087]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.62243391611320931, 2.0870921150140633, -1.966483871868502, 2.0460162713266978],
[-4.0678614422698969, -1.0483170813789053, -3.3875965478845105, -0.56278682367596566], [-3.1888119343249111,
1.1466037700100813, -0.08422750542817603, 1.334817640609244]], [[0.94229536045356443, -0.46607285357294259,
3.2913807414193705, 4.4571709007297127], [-4.8728928412169248, 1.2037128407211304, 1.7363887141819356,
-1.8638393114984897], [0.2688971714658468, -1.0619718642775746, -1.5898347593643845, 4.0497830306245604]]],
[[[-3.0176256202980776, 4.6481698965552418, 2.9546667018974437, -0.80203271850496982], [0.920018392214903,
2.5683755474820238, -0.39881454964042007, -1.8042576364273657], [-2.3945875160036332, 4.1111523243175156,
0.78082260002804471, -4.7775799631740909]], [[2.8847051100624723, 3.801654962351078, 1.7703173227715148,
4.1273910274214991], [-0.89823810002448035, -0.13134866838888204, -2.9139897570290261, 3.0266778096414111],
[-4.6565287368895367, 4.2608689122302597, 4.4884714229987637, 0.39909756290562726]]], [[[-3.0170682335317931,
3.5429625779103553, -1.5481240066779933, 2.2741378666795491], [0.99402869902853208, -2.7818018223223495,
-4.1871147291249109, -3.2256430704428585], [-1.1555881857999983, -1.3659488684092769, -3.6647624059065964,
-4.1746014766224038]], [[-0.16412546559365726, 2.0500472712024926, 0.70515501953914139, 4.9173519977774696],
[4.8984165381421718, 2.4090796502395673, -4.3641606786463338, -2.9169347489558826], [-2.5705157992905558,
-2.1873999149378887, 0.30099797008132612, -0.40586512460845547]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.38997505647)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-2.7324693660399006, -9.1622823254735639, 8.6328151464594995, -8.9819603962624388],
[17.857810264753809, 4.6020858385281453, 14.87146434660761, 2.470620118049252], [13.998804851470311,
-5.033561950002353, 0.36975664789865109, -5.8598161472149455]], [[-4.13665312822154, 2.046048201684489,
-14.449079356187196, -19.566869076641478], [21.39187802580896, -5.2842693459222208, -7.6227031436002131,
8.1822080867524907], [-1.1804518754912556, 4.6620299948548602, 6.9793349375236682, -17.778446488570129]]],
[[[13.247301202882928, -20.405349904126975, -12.970913121521617, 3.5209036287121829], [-4.0388577933199716,
-11.2751045891017, 1.7507859250800237, 7.9206460193673678], [10.51217946579799, -18.047856157115579,
-3.4277917376536409, 20.973456868640252]], [[-12.663783478454937, -16.689170458038671, -7.7716488890093069,
-18.119143658691538], [3.9432428538813187, 0.5766173779281577, 12.79234234817573, -13.287040088306636],
[20.442045004695569, -18.705108243592797, -19.704277588657195, -1.7520283462549344]]], [[[13.244854288882133,
-15.55351734304433, 6.7962257736436893, -9.9834085097043221], [-4.3637611941537289, 12.212040612046716,
18.381329219449761, 14.160492620329668], [5.0730033112170725, 5.9964814607344739, 16.088215549830551,
18.326396353088406]], [[0.72050670008820039, -8.9996563851698248, -3.0956129467236795, -21.587052614141587],
[-21.503926418659777, -10.575799573608803, 19.158556521698415, 12.805270789276145], [11.284500241155726,
9.6026310651088593, -1.3213735807060782, 1.7817377733234934]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.92475193690629176, -2.8751640037436377, 2.3253579475929946, -1.4883270341411139],
[0.99308590109939665, -4.571048057187971, 0.29582761355193554, 0.13789701080785477], [-4.8513899543422578,
-3.6257472257559966, -0.35978952486470916, 3.2517553292871213]], [[1.4829500707237964, -2.4100404084555849,
-1.3007935048236199, -1.9592333033911915], [-3.2571300156448268, 3.5809336502177818, 4.572845912197149,
0.11594932874247199], [4.1148495529245857, -4.948814678027361, 0.20924193900359267, -1.1213985895158896]]],
[[[-2.5470666325037494, 3.6152403556802923, 3.973428980280806, -2.5374743265932209], [-1.5104931521079923,
1.9242528516494346, 1.2141691608491234, 4.0577747879052453], [3.8342739616484334, -3.0938013228729275,
-0.013278763548063566, -0.42321566529779808]], [[2.6294282632341996, 3.3952741627917256, 2.4458864889971466,
4.2648559736340506], [-3.7590978982471257, 1.9398108969769856, -0.72053544714256734, -2.2852607861733709],
[-3.2806396751883415, 0.51560443280332802, 3.3391590838269085, -2.5163633463539634]]], [[[4.01150830599126,
-0.63639401929707073, -3.1327871719882161, 3.2114179123865725], [2.4499611723235173, 2.7753873236361759,
-4.685579988073755, -2.2124483061230249], [-1.592101861637012, -4.6991157531893437, 1.8862972499478818,
-2.4723033101868044]], [[1.6629673331527997, -3.797885972234567, -2.7734235709739954, -0.64462102158809298],
[-0.88743325197640388, 2.5092803298320216, 1.525489125478364, 2.2445055975567962], [-1.160115769838137,
-1.3355312013577905, -1.0067006671660481, 3.6058946626271364]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[1.1454670721774276, -0.011324883951625786, -3.6679325603974888, 0.094693748129400568],
[-0.94166879518392843, -4.1783274612573384, -0.26913364710175003, -3.1060599945156477], [2.1413905443235972,
3.4882436840724846, 2.2658723445074678, 2.1819015356595139]], [[-1.9800856525493749, 2.7480580905119893,
-3.1758715478375219, 4.1124829257304629], [2.0216531202224841, 4.0515397505305888, 4.044920456333422,
-2.3883049937748946], [1.5444958016374528, -1.2594632181245204, 3.5895692628257976, -1.8412492131846401]]],
[[[-0.79280116164120962, -1.2215336421582137, -4.9961696612945055, -2.7749169673973384], [-0.23664202961414915,
-3.5042031492270143, -4.0093512501017203, 0.23193992235689542], [3.1633334993556197, 1.6180559532355963,
2.5015418045872986, 4.5068540065663516]], [[-4.1132185738946214, 4.1318631987586905, -4.2991786205876377,
0.23269781735957817], [1.6926075754432439, -3.007700180988361, 3.1039787293459948, 3.355203306995362],
[-4.1457163793408522, -1.2887202396242636, -2.7544192723911931, -4.0173861352686675]]], [[[1.4375090967135709,
0.047062319770922123, -0.75634196908244178, -0.17805460405137996], [2.5278812537929483, -3.9412959574596886,
-0.1516720332141368, -2.385490768218621], [2.0634420046107511, 3.7303717046166387, -3.7967716979561583,
-0.36579638919369373]], [[-0.89677692789005903, -0.33159623329473931, -2.0783805922287799, 3.3237758621528677],
[1.8764406996924805, 3.8567013916314448, 2.4876054261100879, -3.122046411865298], [-3.9505368448428069,
-3.9474451391708176, 0.76222063661286921, 2.065165407462576]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[1.0592728936584552, 0.032560898684288464, -8.5292561305554226, -0.14093526530513639],
[-0.93515800400241478, 19.099335624075504, -0.079617164548639499, -0.42831638863356958], [-10.388720575054998,
-12.647489860286688, -0.81523713423442645, 7.0950099465605785]], [[-2.9363681584872698, -6.6229310429171893,
4.1311530815811848, -8.0573135077187672], [-6.584787059098673, 14.508295027869941, 18.496797973906915,
-0.27692236086049277], [6.3553678588617721, 6.2328500602902022, 0.75108843274136661, 2.0647742706124967]]],
[[[2.0193173850265365, -4.4161377189515036, -19.851925322587327, 7.0412805631986641], [0.35744616523310901,
-6.7429729026590115, -4.8680306428853894, 0.94115996924851042], [12.129087268589474, -5.0059436486027034,
-0.033217382128710969, -1.9073712167890253]], [[-10.815413171058387, 14.028808362935354, -10.515302901880693,
0.99242267641760229], [-6.3626775794058616, -5.8343695859208742, -2.2365267016703343, -7.6675145471157151],
[13.600601636143761, -0.6644698681936374, -9.1974441340729562, 10.10920321894068]]], [[[5.7665796814044832,
-0.029950178836461127, 2.3694584183777816, -0.57180774483350039], [6.1932109200372141, -10.938622839032124,
0.71067144357861733, 5.2777750094174012], [-3.2852098569207846, -17.529448442415831, -7.1618400125346513,
0.90435962385794955]], [[-1.4913107362062921, 1.2593646828759113, 5.76422972394219, -2.1425757917908261],
[-1.6652158722689767, 9.6775449400565687, 3.7948150260119111, -7.0074506472637719], [4.5830800930287374,
5.2719361490107728, -0.76732802340590534, 7.4467689202114986]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.142013671,self.functionspace)
arg0.setTaggedValue(1,-2.04077395087)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.905206509275)
sub=res.substitute({arg1:s1})
ref=Data(-2.84417122722,self.functionspace)
ref.setTaggedValue(1,1.84732186428)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(1.54368119889,self.functionspace)
arg0.setTaggedValue(1,-0.973182859739)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-0.97051483006179051, -4.8243289242685101])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-1.4981654964063673, -7.4472258576349226]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.94448839773843207, 4.6949542188401541]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(0.576275206322,self.functionspace)
arg0.setTaggedValue(1,-0.446417285252)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-3.7798020411794298, 2.8125776443752777, -0.74593700484018655, 4.9042983986909512,
-1.1062378936297144], [1.3747147971013396, 1.7312150406230939, -2.4865059810459189, -3.9444781957615138,
-4.8713070674060148], [4.7677542872819085, -0.65669250050514094, -2.2966507465733335, 4.6331137703181184,
-4.2587467390331817], [-2.2122452558031123, -0.89486317692759698, -2.7263171047505361, 1.4136050574112167,
1.5057522304514919]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.1782062011384205, 1.6208187623100703, -0.42986500136777495, 2.8262255715721345,
-0.63749747039309623], [0.79221405333400008, 0.99765630472347699, -1.4329117472490631, -2.2731049860965866,
-2.8072134853290578], [2.7475385855977827, -0.37843560621895822, -1.3235028828319906, 2.6699485939051231,
-2.4542101557111002], [-1.2748620912236397, -0.5156874619142493, -1.5711089520403427, 0.81462554611800553,
0.86772767727381395]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.6873689660148408, -1.255583276563631, 0.33299917267007023,
-2.189363577211358, 0.49384371731752186], [-0.61369644771829823, -0.77284431862310299, 1.1100192498224006,
1.7608832478891809, 2.1746356766622736], [-2.1284079256789306, 0.29315888332112217, 1.0252645914581893,
-2.0683020716109741, 1.9011781578167486], [0.98758452140814967, 0.39948239011636527, 1.2170750806399457,
-0.63105773214859318, -0.6721938229809169]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(4.6954921918,self.functionspace)
arg0.setTaggedValue(1,3.80656545201)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.3283491776040073, -3.9068878738203718], [-2.1695978858355423, -2.2223735496995447]],
[[-2.3211489651914921, 4.272069872491878], [1.636342469753175, -4.2787938517786497]], [[2.7410635950334186,
-3.5668158773147507], [-1.0064480730166228, 1.389332564769]], [[0.77463712529690731, -0.94041585508240466],
[3.6978341544417166, -2.6780892355753592]], [[-4.4954676727861065, -4.2409706282499835], [2.3785224394198679,
-4.1039517994892138]], [[-2.0175257524312817, 0.85038925666007348], [-3.2277420742959917, -3.855794844823607]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[15.628237575034362, -18.344761505774329], [-10.187329932293613, -10.43513764988427]],
[[-10.89893684206883, 20.05947072912349], [7.6834332898420676, -20.091043121362375]], [[12.870642707715545,
-16.747956101531248], [-4.7257690683049729, 6.5236002096908132]], [[3.6373025733125388, -4.4157153045874011],
[17.36315139876443, -12.574947094596416]], [[-21.10843335607106, -19.913444470614692], [11.168333542324792,
-19.270073630038475]], [[-9.4732764173030475, 3.9929961146407393], [-15.155837707011633,
-18.104854587064445]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.669578991701924, -14.871824405371543], [-8.258716356981127,
-8.4596103757528684]], [[-8.8356054598727027, 16.261913585211406], [6.2288447130233893, -16.28750885246448]],
[[10.434037982623636, -13.5773180922763], [-3.8311104639897189, 5.2885853424057174]], [[2.948706919001554,
-3.5797545044815582], [14.07604773957002, -10.194321961547963]], [[-17.112291933867024, -16.143532276496508],
[9.0540013447323737, -15.621961136660534]], [[-7.6798438277506191, 3.2370623651649013], [-12.286611468022571,
-14.677335446353782]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-1.72281700023,self.functionspace)
arg0.setTaggedValue(1,1.23448641864)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-4.3265612601968471, 4.9346403281714331, 2.8024200919119817, -3.9056671936664311],
[0.98069732637570617, -2.2852413242790757, 3.2075463029671312, -2.6966078512789116], [-2.1601129611240619,
-2.6503532815304762, -4.675750160645002, 3.2350739199006568]], [[-4.1760984392537086, 4.3345400423125842,
2.3141216950646779, 0.60673873055732486], [3.6895192429599195, -1.0430965175426432, 0.33936966744652075,
-4.9652989404647769], [0.016939166262534222, -3.5215478761207564, 0.96881594277756378, 2.4707123930500092]]],
[[[-4.0598585401879825, 0.32726568454206451, -3.8317591404661555, -4.8432615549786364], [-1.8707032325346216,
0.11786029243200069, -1.2644962697725761, 4.5016381310909193], [1.0052891428203132, 3.5573702004465542,
0.94853515124922705, -3.266716026917611]], [[4.4268917686602247, 1.7045644573811822, -4.2672635941058026,
4.4735466129490451], [-3.3659634968161098, -3.7740307778271154, -0.23936175808445981, 1.638694221507726],
[-2.6562820856857803, -1.8386899346245853, -3.8721446565337256, 2.2142808663189424]]], [[[-4.9689140219050429,
3.0036100506068504, 1.7161971518176031, 1.2296325439044953], [-4.2017528414854652, -1.8394187611478952,
-4.4722717389932569, -2.3151891625454821], [1.0583223957426515, 4.9808003293003509, -0.20896133794562566,
-3.9944246041361611]], [[-3.3354149131160451, 1.5689046088326091, 1.0657585673339192, -2.4003243575280555],
[0.12124021598431511, -1.1303400850693057, -1.9271523374197388, -1.7678094654193863], [1.3900959283471721,
1.5973269294693555, 3.1820328180383193, 0.020208485606988624]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.4538732915852082, -8.5014822473732448, -4.8280569761201289, 6.7287498384724653],
[-1.6895620259559927, 3.9370526030863635, -5.5260152997629799, 4.6457618491254991], [3.7214793318324864,
4.5660736900247683, 8.0554618655674215, -5.5734403461917594]], [[7.1946533857624484, -7.4676192730552966,
-3.9868081968486213, -1.045299799699541], [-6.3563664744313337, 1.7970644132987275, -0.58467183243782017,
8.554301425835547], [-0.029183083606744185, 6.06698254808967, -1.6690925763069098, -4.2565853134149645]]],
[[[6.9943933115474914, -0.56381888491958132, 6.601419787965443, 8.344053343456924], [3.2228793313878823,
-0.20305171545342732, 2.1784956702862215, -7.7554987011078413], [-1.7319292253931926, -6.1286978574257542,
-1.6341524838838573, 5.6279539060835297]], [[-7.626724397207207, -2.9366526251568561, 7.3517142643698472,
-7.7071021560908708], [5.7989391344540557, 6.5019643834157081, 0.41237650603182713, -2.8231702629851867],
[4.5762879346145349, 3.1677262775151811, 6.6709966416095421, -3.8148007197688423]]], [[[8.5605295495980389,
-5.1746704572343623, -2.9566936288903536, -2.1184318506694821], [7.2388512260579478, 3.1689819122397545,
7.7049057815666941, 3.988647247971739], [-1.823295815105068, -8.5810074820485838, 0.36000214540263886,
6.8816626141257302]], [[5.7463095151227659, -2.702935531829326, -1.8361069779390997, 4.1353196092052489],
[-0.20887470520881798, 1.9473691145940035, 3.3201308089314878, 3.0456122001844874], [-2.3948808973010833,
-2.7519019890081795, -5.4820602341926197, -0.034815522552537087]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-5.3410811151064834, 6.0917464659774998, 3.4595495427759313,
-4.8214931062905171], [1.2106575302027247, -2.8210993781265854, 3.9596723481566771, -3.3289257687890399],
[-2.66663011322566, -3.2718251306347788, -5.7721500702479123, 3.9936548173985087]], [[-5.1553368061426292,
5.3509308132656104, 2.8567518036265622, 0.74901072253301881], [4.554661396727484, -1.2876884842321341,
0.41894724535949618, -6.1295941064678363], [0.020911170694103174, -4.3473030256449352, 1.1959901235162222,
3.0500608935740701]]], [[[-5.0118402294424564, 0.40400504285255151, -4.7302546183870042, -5.9789406115194401],
[-2.3093577338610776, 0.14549693030368552, -1.5610034714491989, 5.5572111344423751], [1.2410157936131636,
4.39152519850908, 1.1709537618153174, -4.0327165687679427]], [[5.4649377651792932, 2.104261672325459,
-5.2678789516603199, 5.5225325368173852], [-4.1552362224415518, -4.6589897387390575, -0.29548883949592541,
2.0229457607473758], [-3.2791441588432502, -2.2698377523754134, -4.7801099894819963, 2.7334996565146623]]],
[[[-6.1340568754081053, 3.7079158143505366, 2.1186220756193546, 1.5179646753620302], [-5.1870068172759387,
-2.2707374788199806, -5.5209587222334928, -2.85806957773395], [1.3064846240818391, 6.1487303604553372,
-0.25795993371372866, -4.9310629240686339]], [[-4.1175244107552791, 1.9367914317381283, 1.3156644769179004,
-2.963167819687802], [0.14966940002504314, -1.3953894834570877, -2.3790433871858725, -2.1823367757951],
[1.7160545441447639, 1.9718784005502887, 3.9281762975200034, 0.024947101023013681]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-0.099233059085104713, 4.771977048069223]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.9729085267773208, 4.512809517509826]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(0.26176969234)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.025976207346631468, 1.249158963725014]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0399870427481233, 1.1813167589860505]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.84702689091359229, -3.3372769586299422]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-2.152707415414048, 1.9005183627662312]))
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([0.22148437875716098, 4.0581595354793194])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([0.18760322472460655, -13.543202312199522]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.47679106454891407, 7.7126067162133252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-4.9957974390113735, 4.9127660926149055, -0.033400607153987849, -4.5745875540405283,
-3.40034229393263], [4.6311740546030773, 4.0795994583149682, 4.8540687237153293, 4.9306788508967045,
3.1060981817064288], [-1.3242874361820456, -3.3839454855009707, 0.088505407790738566, -4.4328915815516297,
-3.0958370529970693], [-2.3333608177639089, -4.3040231210385214, 4.1339174077369343, -4.5703847879440351,
-3.3406709387044389]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.78277733029185015, 4.3160080804422201, -3.0818130197239957,
1.7055665928942068, -3.3364799279772583], [4.5669273627829092, 3.6704066897391243, 2.6983979089447621,
-1.2237350853460538, -0.1257348607090929], [2.1891872096029914, -0.7503980382583979, 1.9746042593444724,
-2.0584330310875232, -0.7673935307397155], [-0.23746062225782705, 2.8663010003293437, 3.8185722602896526,
4.8671017855990222, -1.9042813962136051]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.123633480243)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.61764782397173934, -0.60738236964752024, 0.0041294333046613622, 0.56557217998023424,
0.4203961518147894], [-0.57256816597952853, -0.50437507902705625, -0.60012540964938543, -0.60959698629464243,
-0.38401772817938795], [0.16372626457665548, 0.41836895732351315, -0.01094223158545413, 0.54805381376508067,
0.38274910912583321], [0.28848151856172877, 0.53212135749834688, -0.51109059615373209, 0.56505257738107939,
0.41301877449713931]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.096777485598944848, -0.53360309973999165, 0.38101526908524591,
-0.21086513366492038, 0.41250062525520415], [-0.56462512387573127, -0.4537851529579448, -0.33361232456203066,
0.15129462749623307, 0.015545038417276023], [-0.27065683362567039, 0.092774321037059626, -0.24412719668449809,
0.25449123947954888, 0.09487553292096082], [0.029358083150294065, -0.35437076809338752, -0.47210337809722808,
-0.60173673244828274, 0.23543293637500923]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-2.5363493699555439, 1.9315826441262116, -4.797663921800063, -2.3131658171459835,
-1.4174075473244754], [-0.4937783451212221, -4.7652740781432534, 1.5781017135273068, -4.2362357361072114,
-3.918073606586808], [2.094919785395116, 1.3684348598821918, -4.2376402301126852, -1.6050592311847534,
3.151025223042982], [-2.6417620339476366, 0.27296872857386667, -1.4323869283247213, -4.6402797342361799,
-3.9199666124863741]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.69802384928264605, 0.87453186333400357, 0.81014803662176149,
-4.214756633799734, 0.78685864942817574], [0.48682400140861759, -2.7486583171634758, 0.40554914153896249,
-1.7609786982015061, -0.39145780725801416], [1.2068445571926318, 0.18236245525374706, 4.017808328373075,
-1.0950851034750277, 0.12173582687690843], [0.22180579191198468, -4.2110674925319236, 2.9122016067639365,
4.5406571257464297, 3.0637655540581346]]))
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-2.7914057802277092, -3.2955192040483841, -0.4909883356152811, -4.414815259808397,
1.1535659837090115], [0.30062418712185313, 4.6879078677821262, 2.641934893458421, -4.6213986057014331,
2.2307025160830776], [4.0559589148649486, -0.33010334091372151, -1.5019795108463163, 1.5894091005782052,
4.3064711265533191], [2.9888346593766579, -4.5884630123207506, 2.4921626108815289, -3.5186629218511625,
-1.0861727773454932]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0799802919708137, -6.3655676979244857, 2.3555970238060953, 10.212199748003247,
-1.6350731316459357], [-0.14844171362044126, -22.339165843065977, 4.1692419823943165, 19.577333924268451,
-8.7400566524118908], [8.4969085795002854, -0.45172491906991191, 6.364848799967322, -2.5510957490121045,
13.569799142075835], [-7.895789928888072, -1.2525069145814096, -3.5697411470863107, 16.327580247874213,
4.2577610225859299]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.9484678076243735, -2.8820365501694258, -0.39777323610290649,
18.607371903277738, 0.90769337196755784], [0.14635106969487355, -12.885456950875437, 1.071434428043893,
8.1381845005383653, -0.87322591559081664], [4.8949119406016965, -0.060198455736490949, -6.0346657877240473,
-1.7405382293708347, 0.52425182351249966], [0.66294083851702656, 19.32232743186902, 7.2576799597261958,
-15.977041869203234, -3.3277787409867781]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[0.20286433747766175, 2.066180723397264], [-3.1327754856901735, -1.1194293005675835]],
[[-1.7914925359739922, 1.275772548969373], [-2.3842819867614953, -3.1968139299234077]], [[-3.5171630188865488,
-1.4300055611015186], [4.2854751694367756, 1.7799374077309524]], [[-4.2108803597952917, -1.5964309596888695],
[2.7414856168787471, 1.1873651110226469]], [[-3.5913507733928229, -1.3017853623346696], [-0.13258097661378798,
-3.1022689591044426]], [[4.9076894073951749, 2.8964781538465161], [2.969217301725779,
1.8197050412291595]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.7913962593423918, 4.4765688935993317], [1.4500345756448763,
4.8443295010220275]], [[1.7391435441206662, 4.4187921026078829], [-4.2876409136689784, -4.1427096121048841]],
[[0.11488507950500271, 2.1339510129273167], [-2.8155795121378926, -4.6369329094888991]], [[0.67434242728218052,
4.9095299484356563], [0.94463745614236405, 3.557499141589803]], [[0.038621679734069048, -0.10332111066950311],
[-2.0403842705827979, 1.0573287011436552]], [[-2.1400599935190945, 4.2642563454671869], [3.9163707220927186,
-2.5416950274474726]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(3.22032001333)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[0.65328808596966281, 6.6537631347069341], [-10.088539593828681, -3.6049205801225788]],
[[-5.7691792673232181, 4.108395871899436], [-7.6781509993834112, -10.294763877415299]], [[-11.326390459854416,
-4.6050755275843107], [13.800601454753718, 5.7319680565855959]], [[-13.560382296374918, -5.1410185693811012],
[8.8284609982830986, 3.8236956301526179]], [[-11.565298770434771, -4.1921654553826446], [-0.42695317237583746,
-9.9902988157275558]], [[15.804330417828238, 9.3275865669967502], [9.5618299006647014,
5.8600325626225302]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.209509252413918, 14.41598439909561], [4.6695753639655058,
15.60029124329226]], [[5.6005987611804535, 14.229924642760029], [-13.807575844248428, -13.340850673264026]],
[[0.36996672076263859, 6.8720051543895364], [-9.0670670520514882, -14.932407848882294]], [[2.1715984144123999,
15.810257548976251], [3.0420349053536828, 11.456285683055723]], [[0.12437416819593333, -0.33272704048818802],
[-6.5706903014356595, 3.4049367769580914]], [[-6.8916780268502658, 13.732270051265186], [12.6119670159636,
-8.1850713646631412]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-3.0362430129414495, -1.6103362752021533], [3.0322276873410949, 2.889681860828258]],
[[3.0356323227798061, 3.8849951306068178], [3.1682099248345352, 1.252560279633526]], [[-2.422448371330125,
-4.3051891736441767], [3.2099062879412248, 3.0454833071508354]], [[-1.1376898513557334, 0.97676409380038631],
[1.0009530341765513, -3.085670706338802]], [[3.7338110619145226, -3.4624334476005911], [-1.9009045069833541,
0.020021974502883566]], [[2.2281987737323306, -2.9210437430011229], [-1.3860392623437132,
0.463839486811219]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.1305451146381422, 2.3814888644055499], [-0.89220558616836776,
3.8418880701208664]], [[3.3492033714884197, 2.1415021426686387], [1.4787086763681101, 0.38743271004052637]],
[[-4.99502836998282, 4.4948912080047858], [-3.7653670133834769, -4.0800035996907447]], [[-3.1718016142315641,
2.320405525041048], [-2.8237839197556944, 3.5858545025811086]], [[3.0016852702625556, -2.8784349824436584],
[-3.7323763876968008, 0.63313826152617381]], [[1.7585155020491481, -3.655987828892715], [0.54081193002197825,
4.9685421412273278]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[-0.93345150384204523, -3.5420417816351479], [-3.5444663998891226, 2.3971219306415996]],
[[0.8384895676298747, -4.3543886014540583], [1.9437605664303446, -3.0820690979415186]], [[4.9690708795309,
-2.1112283314766356], [2.37080840790198, 4.7216280449696395]], [[-3.3735779321675219, 3.0438054138693342],
[-0.47550686063032277, 2.7155331110677619]], [[-1.3378665576363566, -2.6741065199531286], [-0.22177834028631249,
0.61204525154245371]], [[4.0531432724462295, -4.0695297515588145], [3.928681336032259, -4.8729434946660577]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[2.8341856064600983, 5.7038783692487431], [-10.74762915459401, 6.9269197611686435]],
[[2.5453460338109117, -16.916778513418848], [6.1582415180666157, -3.8604773311674778]], [[-12.03731765914358,
9.0892373557640713], [7.6100728160284898, 14.379639393530271]], [[3.8380853761846505, 2.97307983678279],
[-0.47596003491968808, -8.3792409729148645]], [[-4.9953409522681316, 9.2589158571325303], [0.4215794466015394,
0.012254354420993967]], [[9.0312088694261341, 11.88727441774779], [-5.4453065809776664,
-2.2602636098259725]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.8556635489463869, -8.4353330602232983], [3.162392721967159,
9.2094741479570601]], [[2.8082720868638438, -9.3249325200257633], [2.8742556143627427, -1.1940943831476432]],
[[-24.820650015712328, -9.4897416652449422], [-8.9269637741663139, -19.264259419876904]], [[10.700319930984929,
7.0628628994922567], [1.3427286267814176, 9.737506633230419]], [[-4.0158543396339219, 7.6972417538137563],
[0.82776024058721887, 0.3875092665369389]], [[7.1275152766229084, 14.87815124101582], [2.1246777357809301,
-24.211425105067871]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.083529595185620309, 4.372823949648037, 4.0009796310147365, 0.68196650279571092],
[-3.6991633277760947, -2.7548332509536966, -1.0695165519831527, -4.9760591951223629], [2.4321680119150511,
3.8212450023110254, 0.8004705884830452, -2.0444757685121964]], [[-3.4279885095627494, 3.5526615517407674,
-0.37278949561560104, 1.8208812052515633], [-1.3542092169449638, 4.6164533745863388, 0.70466827998486359,
4.8882907830017537], [-1.0014606132552197, 0.027094091914280583, -0.50731223435619732, 1.3858925949581025]]],
[[[-0.92819420758339621, -0.63572501286400218, 1.5822275384230347, -4.1213856389411898], [0.019204126745418826,
1.2601369851282858, 3.1282675256554278, -1.2005085063042245], [0.31394545189514567, 4.3373088139034053,
-4.2967881792088045, -3.2133421015779429]], [[-4.6394850485838566, 2.7899856599199682, 3.4214279590576524,
0.75816457866836107], [2.6716538166314328, -0.78329465377730312, 0.9411007620209233, -4.3621528303217216],
[-0.21060811641931387, -1.0884058600082236, 3.3643361086045402, -0.59658223764974405]]], [[[-2.7722966748649869,
1.3359537198967564, 3.3994221388292836, 0.89269410005117322], [-2.5434807639867083, -2.2312407449400631,
2.1964509494368221, -3.8483462591031992], [-4.617770174657271, -4.0164566401957895, -2.0915606068178807,
1.3098480489351907]], [[-4.000475916402392, 3.4797401237531425, 4.727298203954307, -1.3658950385993265],
[4.3822054513768176, 4.7641649434095044, 2.2480529159500593, -3.370947660818576], [-0.12763750951483388,
-0.56331578609421484, 1.1108900947641267, 2.3086655633422826]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-0.99642554217400114, -3.1989491729058739, -0.40653827121759534,
-3.7494299986207311], [-4.962891998625155, 0.65199311462416087, -1.5646394746401904, -0.58561931277306201],
[-1.9266349397638294, -2.1350741612611923, -1.3141253220586746, 3.0489459946325113]], [[-2.4730670458068271,
-1.0264935833023214, -2.4514436230760808, -2.4564197681665032], [-0.48365426443540827, -0.92587219714034585,
-4.1751007096042496, -4.5526966630634966], [-2.3782862353872791, -2.5275939067663735, -2.6938709700385766,
-4.8403251647207153]]], [[[3.2867188889910004, -1.4242104085047957, 2.0936224809849646, -1.4601757832869966],
[-0.21399139868108641, -0.98887005738367506, -2.261387321435977, -3.6513388135428149], [2.9334655428664806,
-3.9524701563078288, 3.4584296338361185, 4.5958550113660674]], [[0.37781815561656451, -3.0593937474827717,
-2.0739947527751279, -2.4165789597896401], [4.5330104520530448, 4.9794431912053145, 1.7661478112868867,
3.078941742057026], [4.9858586211966696, -3.1080213069928195, -4.2716128061474183, -1.5876111863601041]]],
[[[0.90451414172461853, 0.1162545327223361, 3.2911315914907693, -1.4337863404739979], [2.0405912462551932,
4.8936580709384394, -1.1291930809589745, 2.111861338433255], [-2.0913683111797732, -1.55247331778124,
4.9769696268492716, -0.24856367420835213]], [[-2.1381113867577026, 1.6110287228762354, 0.19273167692598125,
-1.1320874579780638], [1.2876584378472149, 0.79519349199575018, -3.7974642196600819, -4.2341641389677163],
[-4.3323767453858073, -0.80301234928598664, 4.344905698376083, -0.27642913101571054]]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-1.40149736096)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.11706650721432915, -6.1285012253547855, -5.6073623941039141, -0.955774253928249],
[5.1843676416216082, 3.8608915310841012, 1.4989246251026278, 6.9739338299224034], [-3.4086770500993597,
-5.3554647863033438, -1.1218574172814939, 2.8653273941073367]], [[4.8043168495380142, -4.9790457891328019,
0.52246349429720151, -2.5519602037735591], [1.897920643729998, -6.4699472214569242, -0.98759073474784653,
-6.8509266319601139], [1.4035444065780798, -0.037972298315350331, 0.71099675763065506, -1.9423248144015677]]],
[[[1.3008617323823231, 0.89096692782230835, -2.2174877195310199, 5.7761110964560265], [-0.02691453295315966,
-1.7660786590997284, -4.3842586815689133, 1.6825095033900148], [-0.43999372231503248, -6.0787268563347068,
6.0219372937459816, 4.5034904752087277]], [[6.5022260517828334, -3.9101575394815598, -4.7951222553187058,
-1.0625656561736561], [-3.7443157733956758, 1.0977853901194523, -1.3189502343655459, 6.113545679780513],
[0.29516671935747973, 1.5253979404500404, -4.7151081775766022, 0.83610843165905069]]], [[[3.8853664736090217,
-1.8723356127940041, -4.7642811563429852, -1.2511084253622755], [3.5646815783684827, 3.1270780156899103,
-3.0783202091039361, 5.3934471261761532], [6.4717927132812418, 5.6290533816266439, 2.9313166707337701,
-1.8357485838353969]], [[5.6066564394040377, -4.8768466002510378, -6.6252959572916961, 1.914298291939184],
[-6.1416493752694707, -6.676964595345189, -3.1506402289923527, 4.7243742505563926], [0.17888363274397381,
0.78948558759562215, -1.5569095361235428, -3.2355886943530807]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.3964877677455285, 4.4833188236584016, 0.56976231423887624,
5.2548162481543859], [6.9554800387803839, -0.91376662950692644, 2.1928380945550465, 0.82074392137602725],
[2.7001737836036899, 2.9923008024518656, 1.8417431708300456, -4.2730897651733448]], [[3.4659969381643139,
1.438628048035971, 3.4356917682723473, 3.4426658224843063], [0.67784017522110429, 1.2976074408742768,
5.851392626233844, 6.3805923585144591], [3.3331618824920928, 3.5424161899003064, 3.7754530552637315,
6.7837029445226396]]], [[[-4.6063278491235211, 1.9960271289648437, -2.9342063819375608, 2.0464325068078741],
[0.29990838051872154, 1.3858987757511469, 3.1693283630907771, 5.1173417111346877], [-4.1112442167813024,
5.5393764933208445, -4.8469800048716598, -6.4410786697637112]], [[-0.52951114801769439, 4.2877322632209003,
2.9066981726499224, 3.3868290346857983], [-6.3530021857360977, -6.978676491502144, -2.4752514965759262,
-4.3151287260286901], [-6.9876676997044234, 4.3558836595439399, 5.9866540748393779, 2.2250328879071364]]],
[[[-1.2676741825739934, -0.16293042080947032, -4.612512240031629, 2.0094477723483699], [-2.8598832464155737,
-6.858448871838867, 1.5825611229732273, -2.9597680925181833], [2.9310471689044393, 2.1757872578242567,
-6.9752097975849967, 0.34836133343241132]], [[2.9965574659698517, -2.2578525035345738, -0.2701129365842932,
1.5866175847270911], [-1.8046499024549696, -1.1144615804810425, 5.3221360821765895, 5.9341698666157248],
[6.0718145753232413, 1.1254196883389966, -6.0893738698746489, 0.38741469760974395]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.94895783352211716, -0.46506050687609246, -3.4973315286311935, -3.8305662019128084],
[-0.067011552625391602, -2.0053313702353224, -4.2991620654185292, 1.0093244189118113], [0.87798040910337161,
-3.1557405362751467, -2.7931609613797503, -4.9110293393501543]], [[-0.43377674574157687, 2.704453693295501,
-2.5592027474361601, 2.6750943602438779], [3.4305357783522936, 1.1310936272574317, 2.5896451684195103,
-2.5539402760310126], [4.5085098664567802, 0.11349764632152315, -1.3234736581612081, 2.9677015132450526]]],
[[[-1.1465164843195783, 2.7375150894033169, -0.10912903641971639, -3.2522173068497242], [4.7360277602148102,
-1.6692169008049049, 1.233494102169499, -1.3392318230236588], [4.3885022964524385, 4.7515889636089952,
2.7117463429070652, 2.1998096737486339]], [[-2.5131848249547706, 0.77921677989624794, -3.8934505794744254,
0.24453982436947097], [-3.4599112442822841, 0.95455762656251686, -2.8118915894259002, 0.78333576699382768],
[3.9748218483200848, 2.0642803168106862, -3.4033833226648325, 1.1533845398716842]]], [[[0.49409552443598681,
4.2995599265190414, 1.6569894487618955, -3.0328627613000969], [-4.8050094221736783, -1.8954288851770262,
-0.65657001508748358, -4.2826450941652627], [-0.74170635020795395, -0.84409250409421333, 3.6365860981881823,
1.6100686197509306]], [[2.2396538156436314, 4.3413750637744482, -4.4044991729772871, -2.290449250092359],
[3.1707282757549784, 3.5745559429744826, 1.7312931654177177, 3.6613171235792787], [-0.8687074400525221,
-3.7812204587391882, 0.42585417549408167, -2.1167068471762196]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-1.6280796155619202, 3.2372833297254093, 1.0760799245326114,
-0.6791544053399603], [-4.7288229986341772, 3.0364132365453695, -2.7881857643052821, -4.280550845421411],
[-3.2543576320948486, -0.55086025606976641, 2.7326721039171273, -4.2151748027883897]], [[-0.090786768277823171,
2.6835858500949072, -4.3650061828137812, 3.7514613040027918], [4.6631405077073573, -1.4292251728385805,
-1.3449901199338612, 1.8541088776332506], [-0.75859253853348552, 3.0321616898786914, -0.78197759748368689,
-0.88789890229887547]]], [[[-2.062426285540877, 0.30001445848584307, -0.83075755338807511, 3.1138362566950235],
[-2.3733111917258274, -2.9574688279506556, -1.7570853300314857, 4.3659538409493486], [-1.018783824996695,
0.89420512031921273, -1.8230865992410106, 3.1994892045316963]], [[0.3233781997620504, -1.3905319280411477,
4.9629769739530829, 1.1091529164244776], [2.7351448192501895, 2.0660484456167358, -4.0171925239278465,
4.3911761581077471], [2.1258515256453734, 1.5150132919784713, 2.3610249459029262, -4.5111733078663327]]],
[[[-2.4852355463920439, 2.8830764355289409, 1.862859073381494, -2.1509811646634249], [-2.6325170393160802,
-1.3580306364602079, 3.8152036772116542, -1.8416692569043969], [-3.1659556710428727, -2.074597485798928,
0.35812962834444573, 4.8712028428189775]], [[-3.0119309329000288, 0.64957909215262699, -1.0255988706704269,
4.4011917781031578], [3.4155148745532635, 0.92333362783500483, -4.4110812159393742, 3.9318582439553591],
[-0.47136877672690947, 2.9648508900721211, 4.2958677743999178, -3.5373956917354246]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-0.89769175763822151, 3.1729441493671864, -4.6066637580891037, 4.9743040690579683],
[-2.6788549386226723, 4.5100260370904159, -1.4037388326608391, -1.3338419564469453], [-1.1381168569459419,
-3.1452061025455782, -1.5160789869753852, 4.1456610012343731]], [[-2.5938250596030543, -0.93202167450854922,
-0.77103273701479047, -2.7062810722150674], [-1.105689822011354, -1.8461034185063041, -2.0068461224928411,
-4.7319124228070422], [-4.5218149557274483, 4.8904398898663093, -3.825654173472147, 0.88914553957591913]]],
[[[2.5862303105862106, -2.0807587439433863, 2.3052217753786719, 4.1346828351021188], [0.68224921552396101,
-2.8879458749312326, 0.54057865399082239, -0.0023583593457887631], [1.9450000139899339, 2.3035289835776487,
0.43188049217124025, 2.7210621624224993]], [[-0.54240954949206355, 2.4426743469776238, -0.12851500556720108,
2.8020215668130284], [0.52130307899069095, -2.6755785781541075, 0.43102203402703765, 2.8334377967823654],
[-0.76961602631277248, -0.42105873518056569, -2.6486982753852963, 2.2122858679665658]]], [[[-3.480147294108944,
3.6715573223019664, 1.7695636383465487, 4.490486044765845], [-2.4274939795683825, 4.5761414724005753,
-3.0218037175811974, 3.7714386247368186], [1.1400240440382881, -1.8970197325492588, 3.6790698826844643,
0.066641356768850635]], [[-2.2050658457593322, 2.6656171339669479, -3.3829610334860827, -3.4782484246680303],
[-1.0395696415089963, 2.3867408856698438, -0.23958153915918956, 4.5415496456746567], [-4.7338880436477284,
0.94198626201791846, 4.0511716726168139, -3.3273461069241539]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.85187162549902817, -1.4756110143942358, 16.111030402967682, -19.05440104497081],
[0.1795142286953034, -9.0440966927555042, 6.034900739130368, -1.3462792576110065], [-0.99924430366884154,
9.9254543927430472, 4.2346526407878047, -20.359462808061743]], [[1.1251409933775647, -2.5206094598561033,
1.973229098931474, -7.2395572335172815], [-3.7931084941699296, -2.0881158119306398, -5.1970193648750147,
12.085021719258394], [-20.386647342189029, 0.55505341697671495, 5.0631525238248756, 2.6387185632945438]]],
[[[-2.9651556833340331, -5.6961084589529127, -0.25156663108082233, -13.446887074653596], [3.2311512241062563,
4.8206080630450217, 0.66680058145640564, 0.0031583898860055686], [8.5356370279948504, 10.945422895721002,
1.1711503452182641, 5.9858188677683906]], [[1.3631754486940078, 1.9033728389870741, 0.50036682289677803,
0.68520586182792786], [-1.8036623846788673, -2.5539939372442984, -1.2119872323378713, 2.2195331697718155],
[-3.0590865962052933, -0.86918325925444495, 9.0145355372174212, 2.551616317889247]]], [[[-1.7195252023972392,
15.786080730887091, 2.9321482776529417, -13.619027905308091], [11.664131444095956, -8.6737507294445777,
1.9840257124437006, -16.15173312417452], [-0.8455630728529504, 1.6012601363636387, 13.37925438963315,
0.10729715731115268]], [[-4.9385841352003395, 11.572443754974021, 14.90024907420384, 7.966751495915819],
[-3.2961928569490411, 8.5315388172113202, -0.41478588130656219, 16.628053485294025], [4.1123637638924597,
-3.561857725793407, 1.7252083724272129, 7.0430162874514943]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.4615136516687401, 10.271719200896161, -4.9571383891516385,
-3.3783205220012094], [12.667830843763641, 13.694302756185596, 3.9138846300274661, 5.7095783143275201],
[3.703839279617827, 1.7325690390404491, -4.1429467550425727, -17.474685793305618]], [[0.23548499463939337,
-2.501160177692904, 3.3655626642213927, -10.15250872017001], [-5.1559869979808832, 2.6384974773925669,
2.6991882069804505, -8.7734808313096], [3.4302150860439653, 14.82860448070719, 2.9915758593751893,
-0.78947134857339996]]], [[[-5.3339093730155467, -0.62425770780385803, -1.9150804021305003, 12.874725321875548],
[-1.6191896987491827, 8.5410099019178034, -0.94984282265544051, -0.010296488044085244], [-1.9815345538712901,
2.0598274119188451, -0.78735553775100031, 8.7060090135304602]], [[-0.17540342364848829, -3.3966166692794464,
-0.63781701343747121, 3.1078703927149545], [1.4258394157605607, -5.5278749625207295, -1.7314984927415895,
12.442124498712067], [-1.6360894036981373, -0.63790958050220026, -6.2536427023547434, -9.9800049569406735]]],
[[[8.6489857619996346, 10.585380397622536, 3.296447679619837, -9.6589509024752935], [6.3904192640509674,
-6.2145403162961061, -11.528796655127632, -6.9457425694795969], [-3.6092655873482475, 3.9355523676576474,
1.3175839297390308, 0.32462356654173891]], [[6.6415060299238959, 1.7315291579087375, 3.4695610154653864,
-15.308438368849195], [-3.5506655737079806, 2.2037581206676693, 1.0568136270709447, 17.856729414678441],
[2.2314070162963722, 2.7928488073795359, 17.403297836956384, 11.770139783546139]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.0430204652553)+(1.-msk_arg0)*(1.78425217281)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(2.5646949317)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-0.1103343692)+(1.-msk_ref)*(4.57606250448)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.57326660208)+(1.-msk_arg0)*(3.29535894632)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([4.0270481495052639, 2.7564226252917825])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([10.362668508083143, 7.0930102828763504])+(1.-msk_ref)*numpy.array([13.270569146744505,
9.0834019581016197])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-3.02551699988)+(1.-msk_arg0)*(0.626618362726)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-0.87644218701217369, -3.616282438617354, 0.79667729938717624, 3.9811287325297613,
0.065796525107652215], [-1.4217337078130887, 4.8515183472866479, -0.78214368357519071, 3.1809506063985502,
0.95738137909039533], [0.39346667195906182, 4.3285617066713939, -4.5910865330304649, -4.7275376371854012,
-0.90249772270981055], [1.0196798908676152, -4.5635511009909653, -4.4978908227402012, 1.164740898313557,
-1.7685582007909115]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[2.6516907362186282, 10.941123994409631, -2.4103607127155628, -12.044972658985373,
-0.19906850524632991], [4.3014795022930183, -14.678351234952618, 2.3663890110066546, -9.6240201354420911,
-2.8965736378079594], [-1.1904401048989282, -13.096137028570261, 13.890410353610562, 14.30324548888392,
2.730522202412847], [-3.0850588442572544, 13.807101435875975, 13.608445147811342, -3.5239433883048839,
5.3508029017726937]])+(1.-msk_ref)*numpy.array([[-0.54919476824925706, -2.2660289808399305, 0.49921262496268565,
2.4946483681778449, 0.041229310835994216], [-0.89088444822170665, 3.0400504835102153, -0.49010559441809565,
1.9932420608926262, 0.59991275226964935], [0.24655344177009811, 2.7123562495913784, -2.8768591264593351,
-2.9623618939370084, -0.56552164536805005], [0.63895014371973546, -2.8596049191178006, -2.8184609830641962,
0.72984803470084503, -1.108211044164624]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-3.56361070359)+(1.-msk_arg0)*(4.70518403887)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[4.2317874979104992, -1.1989677806745727], [1.3134595205642725, 4.4478340118436144]],
[[-0.017162439223159964, -0.41900720330017371], [-4.4667032138911269, 2.0617117365888351]], [[2.9794518983997751,
0.52772381685170533], [-2.6894168529203224, 0.41500536076126604]], [[-4.6733566211583097, 2.4911601334345299],
[-4.7318467145182375, -0.81771569841489722]], [[1.3136083167944523, 0.82585873482530836], [0.296465998582784,
-1.7304343680929755]], [[-1.3066203047314175, 2.65896658854032], [3.9719908108129438, -2.8680613980844938]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-15.080443222871585, 4.2726544164712799], [-4.6806584062148398, -15.850348892396909]],
[[0.061160252115363196, 1.4931785545617493], [15.917591382781623, -7.3471380122247991]], [[-10.617606675968551,
-1.8806022422720292], [9.5840346834818018, -1.4789175456560166]], [[16.654023676852269, -8.8775249158636207],
[16.862459599603678, 2.9140204153647811]], [[-4.6811886580533626, -2.9430390270766442], [-1.0564894058000638,
6.166594435995874]], [[4.6562861034687169, -9.475521795410085], [-14.154628967973551,
10.220654296766785]]])+(1.-msk_ref)*numpy.array([[[19.91133899106331, -5.6413640647508636], [6.1800687718624765,
20.927877600075174]], [[-0.080752435100909861, -1.971506005140051], [-21.016660668375366, 9.7007331557512781]],
[[14.018869516935215, 2.4830376799828495], [-12.654201250232003, 1.9526765994999067]], [[-21.989002981827273,
11.72136689810848], [-22.264209635536492, -3.8475028525162167]], [[6.1807688855097629, 3.8858173374624307],
[1.3949270845997368, -8.1420121690652945]], [[-6.1478890026873314, 12.510927152291805], [18.68894776558027,
-13.494756712769872]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.51859660817)+(1.-msk_arg0)*(3.69262914568)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[0.67581666027729526, 2.8108253236352603, -3.2853142689252701, 4.9303752145005504],
[-1.3657702337917108, 4.3904435823590919, 3.3571499478699067, 0.015664424638796248], [-3.1193276566121941,
2.4614116251071474, 4.8836311348501411, 4.5339873643248207]], [[0.92904130460985179, -4.0289182883108268,
1.1622216990935978, -3.453322460119399], [-4.9809628080889743, -3.091966318347751, -0.82579662341565552,
0.19293706069789529], [-4.1200195632312617, -4.252279729557471, -4.2626018577200098, 2.8104051978786853]]],
[[[4.553470024096578, -4.1556230388171853, -4.8792826237543903, -0.084660297479757673], [-1.3940013078624869,
-4.391326436454821, 1.8581067287379591, -4.9713750706033011], [1.1291380851369173, 0.083960164404878412,
1.6804470605141759, -4.9497828740616505]], [[1.5580688980525261, -0.37196449698866818, 2.6938341003534481,
-2.2283064145681664], [0.4008724735809448, 2.2505971863148471, -1.6587271003862281, -2.7339584931448382],
[-4.8561801785490113, 2.7658580526343668, 2.6247408695536976, 1.2921000397940583]]], [[[-3.378532765195783,
-4.7136195887628478, -2.2928079312088725, -3.2689147861576906], [2.1255291713840734, -2.9248168929356853,
-4.2298230449258032, 0.73722628474508767], [-4.0600914026090829, 4.8210888962888614, -1.5990379796232492,
-1.3511964372595688]], [[-2.5350622751408225, 3.9568471756339907, -2.5691428388833124, 3.4094397075929948],
[0.80244721075126435, -1.8355785194219432, -4.4146407757484631, 4.8328210906719811], [-4.920522191727871,
-2.4717933373462273, -4.5300856839069414, -2.5250375128130722]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[1.7021095483176161, 7.0793351262596333, -8.2743813744801198, 12.417626292235164],
[-3.4398242783642043, 11.057756314881239, 8.455306471815506, 0.039452366764170722], [-7.8563280557071602,
6.1993029702993985, 12.299896811775723, 11.419285197263687]], [[2.3398802786380641, -10.147219935524449,
2.9271676292760325, -8.6975262349660429], [-12.545036033862335, -7.7874158819594053, -2.0798485747710065,
0.48593062666356401], [-10.376667297538809, -10.709777303843696, -10.735774580822937, 7.0782769989541201]]],
[[[11.468354158082919, -10.466338090388497, -12.288944666479384, -0.21322513807898585], [-3.510926965763792,
-11.059979868212249, 4.6798213046129975, -12.520888390750915], [2.8438433513788075, 0.21146178529132892,
4.2323682668163789, -12.466506357778224]], [[3.9241470419266724, -0.93682852047446297, 6.7846814281166719,
-5.6122049776897045], [1.009636052268762, 5.6683464398043348, -4.1776644489085921, -6.8857385877058555],
[-12.230758926344738, 6.9660807100382254, 6.6106634513770723, 3.2542787776386608]]], [[[-8.5091611630055262,
-11.87170630845092, -5.7746582787226588, -8.2330776928059901], [5.353350561609421, -7.3664339060593971,
-10.653217974099684, 1.8567756202110501], [-10.22573243546206, 12.142378141868068, -4.0273316318104415,
-3.4031187638502254]], [[-6.3847992476635591, 9.9657018755896978, -6.470634439909837, 8.587003283295978],
[2.0210408232317629, -4.6230818330415877, -11.118699284078886, 12.171926806847758], [-12.392810502499694,
-6.2254503155317185, -11.409458438197062, -6.3595509152671994]]]])+(1.-msk_ref)*numpy.array([[[[2.495540296879188,
10.379335513483992, -12.131447222167042, 18.206047216225837], [-5.0432829716077814, 16.212279934703215,
12.39670974393805, 0.057842910971599444], [-11.518520219746321, 9.0890803063976193, 18.033438665320531,
16.742333887671666]], [[3.43060499894721, -14.877301096998385, 4.2916537198201299, -12.751839165684238],
[-18.392848438720499, -11.417484944606104, -3.0493606800326032, 0.7124450136157725], [-15.213704319978612,
-15.70209206496787, -15.740207856266352, 10.377784144870411]]], [[[16.814276124980314, -15.345174751614865,
-18.017381226508029, -0.31261908195608412], [-5.1475298585355107, -16.215539987468492, 6.8612990623305103,
-18.357444479839728], [4.1694882026791129, 0.31003375015792717, 6.2052677934347074, -18.27771250557068]],
[[5.753370623933491, -1.3735269427402785, 9.947330312604274, -8.2283092119504264], [1.4802733796476881,
8.3106207653820228, -6.1250640356231392, -10.095494814878659], [-17.932072464006065, 10.213288057984194,
9.6921946347836005, 4.7712462660838089]]], [[[-12.475668558412432, -17.405649075135688, -8.4664893922387598,
-12.070890014125325], [7.8487909682557282, -10.80026410464507, -15.619167856781525, 2.7223032660145061],
[-14.992411847417884, 17.802493372372791, -5.9046542486134737, -4.9894673457699161]], [[-9.3610448433105873,
14.611169205765981, -9.4868917262874675, 12.589796434712371], [2.9631399582934548, -6.7781107400101064,
-16.301631196256576, 17.845816015294734], [-18.169663657162346, -9.1274161195937573, -16.727926428843457,
-9.3240271137605752]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.8003466734301279,
3.110968541428603])+(1.-msk_arg0)*numpy.array([-0.057900815820612905, 0.54416620499792501])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-1.23860498141)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.2299183579697544,
-3.8532611324133779])+(1.-msk_ref)*numpy.array([0.071716238902929969, -0.67400697222367489])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-0.79032144715843966,
1.1989835373509852])+(1.-msk_arg0)*numpy.array([3.3888677818436879, 2.0461382220071824])
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-0.92949574400567592, -1.2179599998896897])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.73460042153017635,
-1.4603139890197456])+(1.-msk_ref)*numpy.array([-3.1499381802216635, -2.4921145086501579])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[2.9968239240536168, -0.22744917461103942, -3.368130646004337, 2.5739305354263671,
-3.5520990533562147], [4.2529038750041668, 2.5566407005371827, -0.59374756248383598, 0.27167900571967607,
3.5828984462512263], [-3.506627820649574, 2.8671976480545798, -4.502344160444026, -3.4451554059919767,
-0.53368053099846069], [3.3068381259500921, 0.9313565649084623, 2.680662417641706, 0.49678621396386813,
-4.6856891442661137]])+(1.-msk_arg0)*numpy.array([[-0.85596724799322921, -0.13058599556778994, -0.39878828275994316,
-4.0930080594310017, -4.4366277751460883], [2.6284949729177862, -0.28953336259360274, 4.6575690245651824,
-0.75846368797438046, 2.8728013263404817], [3.9714952839792605, 4.7929536309489222, 1.8212316872876864,
2.7021824250061401, 3.4917451398394661], [-3.9528673242547518, -0.39076547693401587, 4.8178679551326944,
-4.372708435816083, -4.6827874768603746]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(2.10607695069)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[6.3115417917256531, -0.4790254641017731, -7.0935423204623218, 5.4208957733386178,
-7.4809939428412591], [8.9569428246464202, 5.3844820505972715, -1.2504780558755719, 0.57217689193258392,
7.5458598343126875], [-7.385228027718342, 6.0385388796403019, -9.4822832603848397, -7.2557623921047183,
-1.123972265367853], [6.9644555567263726, 1.9615085942275186, 5.6456813303761022, 1.0462699946498486,
-9.8684219048371684]])+(1.-msk_ref)*numpy.array([[-1.8027328915440832, -0.27502415534822766, -0.83987881052595881,
-8.6201899329560003, -9.3438794960261902], [5.5358126774666605, -0.60977954141415425, 9.8091987688843929,
-1.5973828911781676, 6.0503406573173217], [8.3642746773627188, 10.094329167867425, 3.835654078462837,
5.6910041218650154, 7.3538839566996979], [-8.3250427607485502, -0.82298216409611202, 10.146800651772885,
-9.2092604487599345, -9.8623107699953731]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.328567726345117, -2.6681448855288812, 3.2894830838559965, 2.8760842998618212,
2.8603604495641051], [4.0060721329760867, 2.9201617632829659, 2.1721464751857216, 3.3782152232970972,
-0.56533591383663317], [2.812933639561118, -0.55158819240545931, -4.3300172419246827, 4.4161226276280559,
1.5663796584927505], [2.1573608096632757, 0.40478964915762283, -2.783180507622979, -1.5186696606391514,
-3.5877444638062372]])+(1.-msk_arg0)*numpy.array([[1.9794332133759385, -4.6552748863866809, 3.6163220859074414,
4.9277135493827373, 4.7113876120438363], [2.5672843138256862, -3.4663513236737495, 0.70662767196765763,
0.51073573024771424, 0.77820357093096604], [-4.9550804650749072, -1.458958922255027, -0.59095436462229589,
1.6503086087766805, -0.60037112049808439], [2.0892995102876348, 4.3232658919201121, -0.56155298698416622,
2.2070902567073523, 1.8732215024796837]])
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[2.6575119197184351, 0.798780755830788, -4.7246974545048115, -3.594253030805985,
1.5756436650435841], [0.65656829496808733, 0.22663298200663462, 4.5542984252490282, 2.3375554718292921,
2.7227466581468063], [-1.0177447572471587, 1.532657082535188, 2.6217986442668364, 4.1467735088972546,
1.5155361075503855], [-2.6181643161762471, 0.56415045659076313, 3.6099145181301697, -1.4000081350932101,
0.95267578177457679]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[3.5306845689153685, -2.1312627883288111, -15.541812352931064, -10.33737471163186,
4.5069088220969], [2.6302599498672778, 0.66180496835457081, 9.892603271348559, 7.8967654802351435,
-1.5392664701290637], [-2.8628484641474969, -0.84539554973300923, -11.35243333453016, 18.312660324289759,
2.3739049305782052], [-5.6483250889774848, 0.22836226539548773, -10.047043721045087, 2.1261498794140565,
-3.417957261864017]])+(1.-msk_ref)*numpy.array([[5.260367358833121, -3.7185439923480388, -17.086027753956419,
-17.711449359812622, 7.4234680444816901], [1.6855974846268469, -0.785589537166827, 3.2181932936796902,
1.1938731008992738, 2.1188511721101988], [5.0430071650677997, -2.2360837253220716, -1.5493633519903052,
6.8434560203802217, -0.90988411104533029], [-5.4701294236395936, 2.4389724268900039, -2.027158280413504,
-3.0899443142752547, 1.7845727593117799]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-0.26449277327340059, 2.709494171087842], [0.49156618056888668,
1.1557293908754884]], [[2.9426463555847633, -2.2855487493843087], [-0.049194778717681409, 0.65490813497401845]],
[[-2.5662834716418259, -0.6481573871774593], [-4.7039097421222928, -3.6194060480671544]], [[-1.2083023577871419,
3.399741798797292], [2.1299564643809319, -0.094074363510819659]], [[-4.6384172111976261, -4.5399938401719275],
[0.25600785550817573, -2.5059486809113416]], [[0.78159444871409178, -3.8859384118790743], [-2.9903682045869107,
0.31191296888410847]]])+(1.-msk_arg0)*numpy.array([[[1.4148748264368773, 2.6806498611711493], [-4.0435092298014874,
4.2267476915751718]], [[3.125690778072709, -4.9216068996123754], [-0.39858451763556069, -4.7718632732732615]],
[[-1.5015409161151947, -4.3392117282980625], [-4.3901880811233127, -2.8392130815499939]], [[1.5905393663877287,
4.6064612161951075], [-3.1305192294513895, 2.10516821140763]], [[-1.310377796147919, -2.266123007043912],
[-3.9633905374201128, 2.6610704495417856]], [[-3.3823523217509011, 1.9759866351748991], [3.6164091081703162,
4.7947187843172081]]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.422116691453)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.11164681436744017, -1.1437227150112377], [-0.2074982897720079,
-0.48785266669154376]], [[-1.2421401437361563, 0.96476827624502515], [0.020765937229078289, -0.27644765514099601]],
[[1.0832710883803918, 0.27359805181627767], [1.9855988172389765, 1.5278117060356851]], [[0.51004459354416765,
-1.4350877599033474], [-0.89909017568376171, 0.039710359075749616]], [[1.9579533267701619, 1.9164071790311268],
[-0.10806518895313126, 1.0578027661377205]], [[-0.32992406274935349, 1.6403194656131641], [1.2622843327469702,
-0.13166367044669411]]])+(1.-msk_ref)*numpy.array([[[-0.59724228055590678, -1.1315470503419538], [1.7068327379441424,
-1.7841807511750174]], [[-1.3194062497457233, 2.0774924210974488], [0.16824917784877888, 2.0142831369809828]],
[[0.63382548359210156, 1.8316536982639513], [1.8531716676608954, 1.1984792323144307]], [[-0.67139321496560267,
-1.9444641678876049], [1.3214444196665345, -0.88862664035176742]], [[0.55313233986365051, 0.95656834615927522],
[1.6730133005925627, -1.123282253884367]], [[1.4277473713864239, -0.83409694079570218], [-1.5265466476819478,
-2.0239308296843248]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[3.8465426754336836, 0.6522330066175801], [1.1213564084832672, 2.2991484731703036]],
[[-3.146805277366008, -2.9092773950846063], [-0.7507570800585075, -4.3691574327961602]], [[2.6060037046129327,
-3.4798742840305676], [-2.2473704638275493, 0.50713868800830042]], [[-1.9023463196744839, -1.0711886388204297],
[4.0144718529996783, -1.1676181449431819]], [[2.4659181377476571, -3.3224615922640091], [0.19896825249062911,
-0.68785028874854248]], [[2.1633557767317377, 1.5521876431045971], [4.2492954867619375,
-4.7770400998496516]]])+(1.-msk_arg0)*numpy.array([[[3.5872129335758469, -1.4939601669977023], [2.6202996623479304,
-3.4258031677768743]], [[0.71987276551639834, 3.6960268147998683], [1.885662175199954, -0.80213652437160476]],
[[2.0323879500914588, -3.054629290525348], [0.64860207732057251, -4.2079981974529375]], [[1.2861885512240159,
0.023168662849575306], [-2.5160035334539876, 2.6814734060795251]], [[-1.257956004629972, -4.3314818687128174],
[-2.8595244901616734, -2.299231449232094]], [[-0.19237535961527463, -1.5019438253752684], [4.7223884225278852,
-0.33487628154856086]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.7703715218725122, 1.8825293253796147], [-3.5569112589449747, 3.7865091325165103]],
[[1.3234025154170119, -1.0706127854081848], [-0.91030140341539401, -4.0779906813336346]], [[2.4564691769773113,
-4.2202902733308969], [-1.8831049422439405, 0.41091633330696098]], [[2.5279194148611168, 0.406501938106687],
[-0.33008527781400687, -3.1142412235060757]], [[-2.6625713201810175, 2.3132871914260473], [-4.863897694906,
4.1604892545389323]], [[-0.92121754594451399, -3.0027046624253173], [2.5790377788725909, -2.626459339743711]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[14.502894961122463, 1.2278477619381107], [-3.9885652346242333, 8.705746690670745]],
[[-4.1644900195937034, 3.1147095754765983], [0.68341522360130269, 17.817383296222328]], [[6.4015677754703555,
14.686079593308524], [4.232034427486715, 0.20839157015447368]], [[-4.8089781952947206, -0.43544025775836859],
[-1.3251180568739098, 3.6362445602957494]], [[-6.5656829114810957, -7.6858078453893226], [-0.96776122464864589,
-2.8617937350698126]], [[-1.9929212996456993, -4.6607610729091382], [10.959093593951833,
12.546701586580348]]])+(1.-msk_ref)*numpy.array([[[13.525125487647125, -2.8124238253222011], [-9.3201733708150698,
-12.971834980991126]], [[0.95268142866460237, -3.9570135631362282], [-1.7165209244518427, 3.2711052715447542]],
[[4.9924983550597704, 12.891422283435785], [-1.2213857773520564, -1.7291351898596623]], [[3.2513810098112819,
0.009418106351692759], [0.83049572532118243, -8.350755020948105]], [[3.3493975799772628, -10.019961526787521],
[13.908434576224618, -9.5659277382281029]], [[0.17721955668497666, 4.5098937271552346], [12.179218148209955,
0.87953893733186228]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[-0.71566615256646759, 3.7745591573508115, -1.8813261752186392,
-4.2234672474302322], [-4.5183089034463952, 0.90004730988224502, -4.3059651797874992, 2.1824530065115431],
[-3.2409114328950315, 1.425264269049185, -1.0363956947212296, -4.6538419691451374]], [[-0.021160056002790029,
-0.67358505413946013, -0.23398482533746634, -3.2333582167367658], [-4.3340836442891923, 1.8672903891253885,
3.3960333447618343, 1.2033623796151725], [-4.0570605641925273, -0.94056101372721734, -1.7965062790901385,
1.9686802501306877]]], [[[1.5047353385318765, 0.5056728068922558, 2.409658435923566, -2.1000430599662478],
[-4.064887735516467, -2.1275738111117271, -0.62603398290270729, 2.4810787925563362], [3.254883998023141,
3.9921703272954172, 1.7173288579737775, -2.2709735667109623]], [[-1.0333361509972105, 4.6923051335910078,
-4.2187417938772569, -0.26001229923624436], [4.0524307618320652, 3.1529491855521723, 3.8891703598304161,
4.9134254224440408], [-2.2621317317299292, -1.4221931139149491, 2.8628397330646003, 2.486038796267632]]],
[[[1.5100124197717211, -2.2988431057193734, -2.9336531752144488, -2.6131669437218363], [-2.8974794045596344,
-0.85417257745919439, 4.3573210605245887, 4.2307931931872957], [-0.12423354634863593, -4.6765195903139816,
-1.5778735854722301, 0.1126115177128213]], [[-4.1436398962257615, -0.76648956746585917, 2.1159176768980608,
-3.2536216330349363], [-2.2856244596599904, 3.705073565316475, 2.2220301188746312, 0.98343571363908122],
[2.4860129265223163, -3.1262241511848043, -3.8305865546189102,
3.1860325494154687]]]])+(1.-msk_arg0)*numpy.array([[[[2.4089227637145711, -2.6908075329445138, -0.26014840610314227,
-3.9215812800091445], [4.009158789021523, 2.7447580688873892, 4.7131775334041954, -4.4290013383016644],
[-1.7082394800006173, -0.49659550318463985, 3.9472706938122357, -1.6162682425555799]], [[-3.8246426595124583,
-2.6800405903617683, 4.0143006613192842, 2.2139414823010402], [-2.5044774188651475, -1.6949103444811264,
0.41642158825732167, 3.2060207528109235], [-3.5084114918483778, -2.1345709077268706, 1.4430246021304205,
1.2294065151503952]]], [[[3.5126626365828901, 3.8348878052528992, -1.5523954598502341, -1.2410335237730994],
[-2.1674583781546621, 3.3708587151697049, 0.56105884392031768, 2.2747443626383266], [-2.0719480048860914,
1.733990254553726, -0.050490856134826956, -3.7318225551204542]], [[1.4746408782374925, 4.0027176844782097,
2.8083560519562889, 2.9394382256580087], [-4.6069170484521784, -4.8378490055724885, 1.636720298987302,
-1.5376228376365288], [-2.6819306123720734, 1.1516574074480923, -3.6392748736610949, 3.6118499182970254]]],
[[[1.9375861833692003, 2.9438381921906851, 0.57660731138154819, 0.51102545141293199], [-3.0370737652416144,
4.5219314530432726, -2.0670940828628037, 2.8140667234989287], [-1.3644739955281162, -2.7108624230624456,
-2.1786469944211206, 2.8077953462368104]], [[4.022901264470141, 4.7164854778488916, -0.17421287538805963,
-3.832102145875953], [-4.8385573319832922, -4.2618026800252213, -4.5775319725945369, -2.3564870023253039],
[-4.2941599674913693, -3.8000625353557038, 1.1131824136314092, 1.8132425713432623]]]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(0.717965207903)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-0.51382339801631816, 2.7100021501484619, -1.350726738523665, -3.0323025403715427],
[-3.2439885912315676, 0.64620265396188303, -3.0915331855279722, 1.5669253265579597], [-2.3268616507127589,
1.0232901572442055, -0.7440960504300036, -3.3412966169236569]], [[-0.015192184007276179, -0.48361063343539723,
-0.16799296376949385, -2.3214387043033553], [-3.1117212647398351, 1.3406495324431462, 2.438233786416474,
0.86397232106270994], [-2.9128283314443832, -0.67529008376584743, -1.2898290041654794, 1.4134439250790429]]],
[[[1.080347620167597, 0.36305548193114656, 1.7300509199223877, -1.5077578521533153], [-2.9184479681312658,
-1.5275239736231971, -0.44947061868890531, 1.7813282511207205], [2.3368934663398959, 2.8662393990196935,
1.2329823705524707, -1.6304800089652003]], [[-0.74189940448410086, 3.3689118307816321, -3.0289098291289465,
-0.18667978447841244], [2.9095042944301057, 2.2637078175116527, 2.7922890059647112, 3.5276685049395078],
[-1.6241318790748014, -1.0210851747097522, 2.0554193241418712, 1.7848893612164991]]], [[[1.0841363808970814,
-1.6504893683335269, -2.1062609118572939, -1.8761629480337436], [-2.0802894030884849, -0.61326619216028622,
3.1284049211184035, 3.037562314540097], [-0.089195363932689647, -3.3575783599208848, -1.1328583368377678,
0.08085115172692571]], [[-2.9749892795677022, -0.55031284166088568, 1.5191552747991404, -2.3359871321986905],
[-1.6409988403673095, 2.6601139126172866, 1.5953403162639133, 0.70607262660183523], [1.7848707876394252,
-2.2445201726558786, -2.7502278720763003, 2.2874605217258854]]]])+(1.-msk_ref)*numpy.array([[[[1.7295227328719085,
-1.9319061898166923, -0.18677750447340177, -2.8155589190091503], [2.8784365234748241, 1.9706407975713816,
3.383897487652936, -3.1798688666551458], [-1.2264565134062644, -0.356538293687512, 2.8340030243311856,
-1.1604243647929684]], [[-2.7459603621904449, -1.924175899646795, 2.8821282088880955, 1.5895329566247054],
[-1.7981276507231641, -1.2168866578518498, 0.29897621218834564, 2.3018113563323048], [-2.5189173861531855,
-1.532547645549204, 1.0360414584772961, 0.88267110424690254]]], [[[2.5219695601663243, 2.7533160203819742,
-1.1145659290786001, -0.89101889190998895], [-1.5561597050922877, 2.4201592782474872, 0.40282072952090631,
1.6331873092471489], [-1.4875865800916528, 1.2449446736119429, -0.036250678022026972, -2.6793187566430881]],
[[1.0587408447256197, 2.8738120345122611, 2.0163019367076376, 2.1104143768017321], [-3.3076061564825183,
-3.473407267087782, 1.1751082297410078, -1.1039597002996688], [-1.9255328696923644, 0.82684994997116834,
-2.6128727412832053, 2.5931825775035184]]], [[[1.3911194669720908, 2.1135733996881303, 0.41398398819427723,
0.36689849446726314], [-2.1805132972775692, 3.2465894558060273, -1.4841016329570593, 2.0204020001890131],
[-0.97964485587718853, -1.9463049031696793, -1.5641927422961792, 2.0158993695091798]], [[2.8883031427173891,
3.386272476673903, -0.12507878329731756, -2.7513160138682586], [-3.4739158208065777, -3.0598260472046448,
-3.2865086943851498, -1.6918756805445263], [-3.0830574538274456, -2.7283126882399662, 0.7992262430365179,
1.3018450797125141]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[2.9251174606479946, -4.9654026519140189, 0.30085621199683832,
-0.30061228591591238], [-2.513978591770071, -3.8844686856929558, -2.1891972059679632, 4.5141042306447474],
[-2.236512896721278, 1.8541668723658136, 0.26092160236246542, -1.1184332157429031]], [[1.8254032188716751,
3.8918830207374366, -3.02810273755754, 3.0709862855640271], [-3.2428568515005187, 2.0131925931621293,
1.9979305988685745, 4.1731959115852639], [-0.67035244538272032, -2.00829150675079, -4.7874365846650164,
4.1003641988688031]]], [[[-1.175586228847092, -1.8341477845829668, 2.4832839618398461, 0.89234831426067096],
[-1.7369861501359631, 1.8284772242403307, -0.27353996503704359, 4.8231831031252348], [0.099524325829133176,
1.2133893916939442, -4.6692295366623551, -4.4818711372137141]], [[3.8966804335291769, -0.47007955430217407,
1.9640723963394606, 4.8351918103493343], [2.1596571322083662, 3.4394328531876912, 2.846261179352954,
-1.8012535813987718], [0.41896290835312833, -4.2874267756908147, -0.37745703724418522, -2.6740921738817813]]],
[[[0.48734325359850583, 4.9758075524770824, -2.053696707710202, -1.2492068717010851], [-0.81009221983657476,
-0.032340552500626174, -2.7423954288910823, -4.1769441535542455], [-4.1686249915574001, 3.0106427920402847,
-3.5225347400306015, 3.9203298909772801]], [[-3.7843028776929879, 4.7534900290748308, 4.7905989355194496,
4.9295960701557782], [0.2236860439332089, 1.1116309427796969, -4.6113096924535757, 4.4416902722827007],
[-0.78621657417830626, 1.5380907655682377, 4.5160456196047676,
-3.7405412441349561]]]])+(1.-msk_arg0)*numpy.array([[[[4.3843342392813938, -1.6479745021651206, 1.0928655318928948,
-0.27867216892680435], [-1.8534416490446235, -0.60606977099904125, 3.7896814633860565, 1.6774705245027342],
[-2.1712907977922269, -0.9731023323951602, -2.2363323585193129, 3.0179076547691572]], [[3.5949000961561293,
-4.4826056152295042, 4.4507316837878168, -0.14917077276962942], [2.865875878248751, 0.65613479495564864,
-3.5114334284145352, -2.2657040605205969], [-3.3885348683628269, -4.1955144800795416, 0.19874114120673436,
0.77237878029658802]]], [[[3.6482369572987956, 4.1192696915374132, 4.7177908974828782, 3.267869957103418],
[-3.9817588302026605, 3.9472020063214721, -2.2489247733823725, 0.35633644710087964], [0.30626607261116678,
1.1534627023800335, 2.6743275744867319, 2.9389573367973432]], [[1.9302124685612068, -3.5741196455396427,
0.86533807292409826, 3.713340528874534], [-0.75399962875646054, -2.6842573138405124, -3.0230491895158327,
1.9153830313206104], [1.1778385254967532, 0.11792305960293703, 4.5139117168026992, 2.8139119131046897]]],
[[[-4.417619341991335, -4.8560564315233137, 4.580720581716939, -0.80247960909749949], [-3.4934853556245651,
1.0770893472402108, 3.378803127896246, 2.2030520956731934], [3.9240010232090405, -0.580967720625301,
-1.2967054817990578, 1.780069501914376]], [[-0.19831352998526164, -3.5200058893139854, 0.76880268223223958,
-3.5461945196094549], [2.6005628531204348, 4.7941479926695827, 4.9792519595550839, -2.3841553080010258],
[0.78378064155146721, 0.72888520432430148, -0.39840096436977745, 2.3135172058068862]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[0.6868381582863794, 0.54627711634776066, -3.2186962400562225, -1.0774604195563131],
[-1.8950462499252776, -3.9532997580259446, 0.94174368416758636, -1.45380459144517], [2.0866672501914945,
4.3633150084832497, -4.2883759710766967, 1.1324613195015356]], [[-1.6473475256931112, -2.4240842974365195,
3.0575003656751942, -0.88291292348152517], [-0.26167034327218541, -0.11708643329371515, 2.69348684719085,
-4.9379016274841749], [0.9686171163405044, 4.8295378077612252, 3.7003121978510229, 4.056897346655898]]],
[[[-1.6908031207422525, 4.8230189206260157, 0.82952788955472112, 3.4446585748455014], [3.3176619789094879,
3.0037317679930418, 1.4804656022305664, 1.0055708904902172], [0.88162105416707792, -0.50313800229601746,
3.9994903610131427, 4.5365056130602301]], [[-4.8084467422206547, -0.19193357971699321, -3.9848748508964316,
-3.0854097037447001], [-1.4115589742338477, 1.453415069972718, 3.991034445603626, -4.9809560423233554],
[0.17116217177604565, 3.3177075206574909, 1.7537041099136621, -1.9103533234598826]]], [[[2.5308867325681046,
-1.042247247233361, -1.1846149285407979, 3.7060742981010737], [-1.297359352173022, 4.2498337462445868,
1.493118867126439, 3.1157447558047586], [0.15917981571003992, -4.2811882943532051, -2.892893263308518,
-0.15525299846753171]], [[0.70528939883089059, -3.5415574610175469, 0.91408910363181572, -4.9096466754450141],
[3.8808985862028376, -2.4998339203436348, -0.7493422147604818, -2.7415281675633221], [-2.6740604084639994,
1.5889649415442406, -3.0729275401598812, -3.7916787182652412]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.0090822894427993, -2.7124858421931139, -0.96836475835178126, 0.3238978397067413],
[4.7641057027263036, 15.356469115209322, -2.061662642117656, -6.562625456773401], [-4.6668582162192029,
8.0903141424262, -1.1189299299060254, -1.2665823552745537]], [[-3.0070734760004947, -9.4342525180294281,
-9.2584252273842349, -2.7114134793590052], [0.84855946551469918, -0.23571754026667907, 5.3813997896526438,
-20.606830883627179], [-0.64931485257841604, -9.6991197608586983, -17.715009990674201, 16.634756638713682]]],
[[[1.9876848644362788, -8.8461294682679377, 2.0599533040300941, 3.0738352724669484], [-5.7627329081984522,
5.4922551255024175, -0.40496650907269482, 4.8500525280070113], [0.087742741052748138, -0.61050231454407089,
-18.674538525238951, -20.332033570982652]], [[-18.736980336078339, 0.090224051608984959, -7.8265826975130048,
-14.918547731118739], [-3.0484834062368544, 4.9989235409822532, 11.35952640798204, 8.9719649100247967],
[0.071710601287329825, -14.224428057977713, -0.66194795753096181, 5.108460871613123]]], [[[1.2334105747390316,
-5.1860217243322069, 2.4328397786485927, -4.629653480222637], [1.050980717527584, -0.13744197138935588,
-4.0947223559985781, -13.014291841725987], [-0.66356095792037362, -12.889128679761718, 10.190317019204748,
-0.60864297055611438]], [[-2.6690287016020968, -16.834758078342482, 4.3790342868285039, -24.202574957127123],
[0.86810285165369627, -2.7788927376642603, 3.4554490178896384, -12.177018993055025], [2.1023906134884078,
2.4439723034008711, -13.877480957101886, 14.182930630179902]]]])+(1.-msk_ref)*numpy.array([[[[3.0113280542199465,
-0.90025075885739858, -3.5176021783907041, 0.30025823205054236], [3.5123576464773363, 2.3959754790373493,
3.5689085831507947, -2.4387143505360127], [-4.5307613983952022, -4.2459520117298588, 9.5902339496154969,
3.4176636848536646]], [[-5.922049778516727, 10.86621388347861, 13.608113750703422, 0.13170480308403179],
[-0.74991472483682653, -0.076824482901260024, -9.4579997542208236, 11.187823767842159], [-3.282192872812852,
-20.262395804553826, 0.73540426902211176, 3.1334614243985466]]], [[[-6.1684504326080232, 19.867315661446234,
3.9135391265494457, 11.25669626921629], [-13.210129880150486, 11.856336061073677, -3.3294557689967741,
0.35832155842535174], [0.27001061779106766, -0.58035091979845577, 10.695947356351342, 13.332596454925692]],
[[-9.2813238562468232, 0.6859935779052545, -3.4482639243184217, -11.457176901097965], [1.0643149425401714,
-3.9013400316202884, -12.065093446111812, -9.5404386832200192], [0.20160140002551957, 0.39123422170360572,
7.9160655295443281, -5.3755659751229006]]], [[[-11.180494182182111, 5.0612114481650323, -5.426389984575974,
-2.9740490540264393], [4.5323058977990254, 4.5774506556220018, 5.0449546985677118, 6.8641480138584354],
[0.62462175972042311, 2.4872322049381017, 3.7512305527917205, -0.27636112765281257]], [[-0.13986843034333699,
12.46630312012565, 0.70275415467140356, 17.410562133681889], [10.092520700006713, -11.98457377122277,
-3.7311636912234754, 6.5362289327304204], [-2.095876782493292, 1.1581730360816256, 1.2242572954381448,
-8.7721139535984367]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(3.43231999068,self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-1.4540852929)
sub=res.substitute({arg1:s1})
ref=Data(-2.36046675352,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-1.99888672106,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-1.0166253896000677, 4.6638233509673803])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.966197914693572, -0.42859400338170606]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(1.75603219348,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1= | numpy.array([[-1.9661246795683085, -1.3570192095877909, -1.8523576170966627, 0.73214584413366701,
4.0725024727547723], [1.8898911955656281, 3.704542947914863, -3.7748480598358505, -2.2414248653554889,
4.3999361819787648], [-3.981378759357713, 0.34597746243312777, 2.0496740598398322, -2.7919624238592222,
3.7661073743193256], [-2.3423350580789757, -0.53058380320099552, -0.56646103984835783, 1.7717941659157441,
-4.6013674001294715]]) | numpy.array |
import pylab as plt; import numpy as np; import pandas as pd
import math; import json; from numpy.random import random, normal, uniform, randint
from scipy.interpolate import interp1d; from astropy_healpix import HEALPix;
from astropy.coordinates import ICRS, SkyCoord; from astropy import units as u;
from timeit import default_timer as timer
start = timer()
N = 1000 ##Change to alter the number of loops the code runs for
placement = np.zeros(N)
placement2 = np.zeros(N)
placement3 = np.zeros(N)
placement4 = np.zeros(N)
placement5 = np.zeros(N)
placement6 = np.zeros(N)
placement7 = np.zeros(N)
placement8 = np.zeros(N)
placement9 = np.zeros(N)
placement10 = np.zeros(N)
placement11 = np.zeros(N)
placement12 = np.zeros(N)
placement13 = np.zeros(N)
placement14 = np.zeros(N)
placement15 = np.zeros(N)
placement16 = np.zeros(N)
placement17 = np.zeros(N)
placement18 = np.zeros(N)
placement19 = np.zeros(N)
placement20 = np.zeros(N)
placement21 = np.zeros(N)
placement22 = np.zeros(N)
placement23 = np.zeros(N)
percentages = np.zeros(N)
percentages2 = np.zeros(N)
percentages3 = np.zeros(N)
percentages4 = np.zeros(N)
percentages5 = np.zeros(N)
percentages6 = np.zeros(N)
percentages7 = np.zeros(N)
percentages8 = np.zeros(N)
percentages9 = np.zeros(N)
percentages10 = np.zeros(N)
percentages11 = np.zeros(N)
percentages12 = np.zeros(N)
percentages13 = np.zeros(N)
percentages14 = np.zeros(N)
percentages15 = np.zeros(N)
percentages16 = np.zeros(N)
percentages17 = np.zeros(N)
percentages18 = np.zeros(N)
percentages19 = np.zeros(N)
percentages20 = np.zeros(N)
percentages21 = np.zeros(N)
percentages22 = np.zeros(N)
percentages23 = np.zeros(N)
no_se_func = []
ras_dex = np.zeros(shape = (N, 2))
test_case = np.zeros(shape = (N, 2))
def Ang_Dist(ra1, ra2, dec1, dec2):## Calculates the angular distance between apparent position and galaxy
ra1 *= (np.pi/180); ra2 *= (np.pi/180)
dec1 *= (np.pi/180); dec2 *= (np.pi/180)
return (180/np.pi) * np.arccos(np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(ra1 - ra2))
#################################################################
"""
def rank(theta, sigma, d_lum, luminosity, luminosity_probability): ## Normal
## Implements a ranking statistic defined in report
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability #* Colour_factor
def rank2(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank3(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity Distance
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank4(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank5(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank6(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_Lum, Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank7(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_lum, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank8(theta, sigma, d_lum, luminosity, luminosity_probability): ## All
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank9(theta, sigma, d_lum, luminosity, luminosity_probability): ## Angular Distance
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank10(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, D_Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank11(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank12(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum_Prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank13(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Ang_Dist
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank14(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank15(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except d_lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank16(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum_prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank17(theta, sigma, d_lum, luminosity, luminosity_probability): ## No angular Distance
return np.exp(0 * -(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank18(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Distance
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability
def rank19(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity**0)[:, 0] * luminosity_probability**2
def rank20(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Probability
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**0
def rank21(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 1
return np.exp(-(2 * theta**2/((sigma)**2))) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
def rank22(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2) * (sigma**2))/(2)) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
def rank23(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2)**100/(2 * (sigma)**2))) * (1/d_lum**8 * luminosity)[:, 0] * luminosity_probability**2
"""
#################################################################
#Daves old functions before I fixed them
def rank(theta, sigma, d_lum, luminosity, luminosity_probability): ## Normal
## Implements a ranking statistic defined in report
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability #* Colour_factor
def rank2(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank3(theta, sigma, d_lum, luminosity, luminosity_probability): ## Luminosity Distance
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank4(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank5(theta, sigma, d_lum, luminosity, luminosity_probability): ## Lum_prob, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank6(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_Lum, Lum_prob
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank7(theta, sigma, d_lum, luminosity, luminosity_probability): ## D_lum, Lum
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank8(theta, sigma, d_lum, luminosity, luminosity_probability): ## All
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank9(theta, sigma, d_lum, luminosity, luminosity_probability): ## Angular Distance
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank10(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, D_Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability
def rank11(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability
def rank12(theta, sigma, d_lum, luminosity, luminosity_probability): ## Ang_Dist, Lum_Prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**2
def rank13(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Ang_Dist
return np.exp(-(theta**2/(2 *(sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability**2
def rank14(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity)[:, 0] * luminosity_probability**2
def rank15(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except d_lum
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum * luminosity**2)[:, 0] * luminosity_probability**2
def rank16(theta, sigma, d_lum, luminosity, luminosity_probability): ## All except Lum_prob
return np.exp(-(theta**2/((sigma)**2))) * (1/d_lum**2 * luminosity**2)[:, 0] * luminosity_probability
def rank17(theta, sigma, d_lum, luminosity, luminosity_probability): ## No angular Distance
return np.exp(0 * -(theta**2/(2 *(sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability
def rank18(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Distance
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability
def rank19(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity**0)[:, 0] * luminosity_probability**2
def rank20(theta, sigma, d_lum, luminosity, luminosity_probability): ## No Luminosity Probability
return np.exp(-(theta**2/(2 * (sigma)**2))) * (1/d_lum * luminosity)[:, 0] * luminosity_probability**0
def rank21(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 1
return np.exp(-(theta**2/(2 * (sigma)**2)))**(4) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
def rank22(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-(theta**2/(2 * (sigma)**2)))**(sigma**4) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
def rank23(theta, sigma, d_lum, luminosity, luminosity_probability): ## Optimise 2
return np.exp(-((theta**2)**100/(2 * (sigma)**2))) * (1/d_lum**0 * luminosity)[:, 0] * luminosity_probability**2
#################################################################
def convert(h, m, s): #Hours minutes seconds to degrees (More for applied code than here)
return h + (m/60) + (s/3600)
#################################################################
def Luminosity_Handling(magnitude): ##Converts Absolute B Magnitude to Luminosity
solar_b = 4.74
solar_l = 1 #3.846e26 W
return solar_l * 10**(0.4 * (solar_b - magnitude)) ## Gives an array in terms of solar luminosity
###########################################################
def spherical_convert(ra, dec): ##Test ##Converts ra and dec to an xyz array
r = 1
#ra = phi
#dec = theta
##Convert to radians
ra = ra * np.pi/180
dec = dec * np.pi/180
x = np.cos(ra) * np.cos(dec)
y = np.sin(ra) * np.cos(dec)
z = np.sin(dec)
return np.array([x, y, z])
############################################################
def rotation(x, angle):##Test #Rotation about the z axis
#need angle in radians
rotation = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
return x * rotation
############################################################
def back_convert(axyz): ##Test ## Converts xyz coordinates to ra and dec
x = axyz[0]
y = axyz[1]
z = axyz[2]
r = modulus(axyz)
arg1 = float(y/x)
arg2 = float(z/r)
phi = np.arctan(arg1)
theta = np.arccos(arg2)
return (180/np.pi) * phi, (90 - theta * (180/np.pi))## Returns ra, dec in that order in degrees
#################################################################
def modulus(array): ##Test ##Finds the modulus of a matrix/array
return np.sqrt(array[0]**2 + array[1]**2 + array[2]**2)
#################################################################
def find_nearest(array, value): #Kind of a hash and not exactly interpolation, but for this point, should be okay
array = np.asarray(array) - value
truey = [i for i, val in enumerate(array) if val >= 0]
idx = truey[0]#(np.abs(array - value)).argmin()
return idx
#################################################################
def reduction(RA_dec, Dec_dec, df_master): ##Reduces the df_master by considering angular distance
#host = df_master.iloc[current_i]
#RA_dec = ra_prime[0]#host[["RA"]].values.tolist()[0]
#Dec_dec = dec_prime[0]#host[["dec"]].values.tolist()[0]
## Testing purposes only (hashed out lines)
RA = df_master[["RA"]].values.tolist()
ra_arry = np.isclose(RA, RA_dec, atol = error_radius)
res_ra = [i for i, val in enumerate(ra_arry) if val == False] ##Something up here - removing too many items
DEC = df_master[["dec"]].values.tolist()
dec_arry = np.isclose(DEC, Dec_dec, atol = error_radius)
res_dec = [i for i, val in enumerate(dec_arry) if val == False]
indices_to_keep = set(range(df_master.shape[0])) - set(res_ra) - set(res_dec)
df_sliced = pd.DataFrame.take(df_master, list(indices_to_keep), axis = 0)
ra = df_sliced[["RA"]].values
dec = df_sliced[["dec"]].values
return np.array(ra[:, 0]), np.array(dec[:, 0]), df_sliced
#################################################################
def Luminosity_back_convert(L_given, d_L): # ##Converts luminosity to luminosity at source
#L = L0/4 *np.pi * d_l**2
return (L_given) * (4 * np.pi * (3.086e22 * d_L)**2)
def Luminosity_for_convert(L_given, d_L): # ##Converts luminosity at source to apparent luminosity
return(L_given)/(4 * np.pi * (3.086e22 * d_L)**2)
#################################################################
def L_func(L_test, c, d_L): ## ##Takes an input and returns a probability based on the broken power law
L_star = np.log10(4.61e51 * 1e7) ##All from Guetta/Piran 2005
del_1 = 30
del_2 = 10
alpha = 0.5
beta = 1.5
L = np.zeros(len(d_L))
SGR_test = np.zeros(len(d_L))
for j in range(len(d_L)): ## Slightly inefficient, but on the scales of reduced catalog, not too drastic
L[j] = np.log10(Luminosity_back_convert(L_test, d_L[j]))
L_prob = np.zeros(len(L))
for i in range(len(L)):
if L[i] < L_star and (L_star/del_1) < L[i]:
L_prob[i] = c * (L[i]/L_star)**-alpha
elif L[i] > L_star and L[i] < del_2 * L_star:
L_prob[i] = c * (L[i]/L_star)**-beta
elif L[i] < (L_star/del_1):
L_prob[i] = 0 ## What to do when the values fall outside the range that defines the power law?
SGR_test[i] = 1 ##Creates a flag for if the luminosity at source would be low enough to be considered an SGR
else:
L_prob[i] = 0
return L_prob, SGR_test
#################################################################
def L_func1(L): ## ##Builds the broken power law based on a log scale from 52 to 59
L_star = np.log10(4.61e51 * 1e7)
del_1 = 30
del_2 = 10
alpha = 0.5
beta = 1.5
N = len(L)
L2 = np.zeros(N)
summ = 0
sum1 = np.zeros(N)
for i in range(N):
if L[i] < L_star and (L_star/del_1) < L[i]:
L2[i] = (L[i]/L_star)**-alpha
elif L[i] > L_star and L[i] < del_2 * L_star:
L2[i] = (L[i]/L_star)**-beta
else:
L2[i] = L_star
summ += L2[i]
c = 1/(summ)
sum1[i] = summ
L2 *= c
return L2, c
#################################################################
def cumulative(array): ### #Builds cumulative distributions
N = array.shape[0]
summing = np.zeros(N + 1)
#array = L2
for i in range(1, N + 1):
df = pd.DataFrame(array[:i])
summing[i] = df.sum().values.tolist()[0]
return summing# /= summing[-1]
##If you have N galaxies
##########################################################################################
def axis_rotation(axis, point, angle): ## Rotation about an axis function
init_matrix = np.array([[0, -1 * axis[2], axis[1]],
[axis[2], 0, -1 * axis[0]],
[-1 * axis[1], axis[0], 0]])
matrix_2 = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
term_2 = np.sin(angle) * init_matrix
rot_matrix = (1 - np.cos(angle)) * np.dot(init_matrix, init_matrix) + term_2 + matrix_2
rotated_point = np.dot(rot_matrix, point)
return rotated_point
def Sector_find(RA_grb, Dec_grb, err_radius):
'''
Give coordinates of the grb location and an error in the position, this function
will use cone_search to find all sky sectors that the cone intersects and
will read the corresponding csv files and compile them into one dataframe
'''
#corrects for if the rotations of the galaxy coords puts the GRB in an invalid position
if abs(Dec_grb) > 90:
x = RA_grb
parity = Dec_grb/abs(Dec_grb)
Dec_grb = (180 - abs(Dec_grb))*parity
RA_grb = RA_grb + 180
if RA_grb > 360:
RA_grb = x - 180
elif RA_grb < 0:
RA_grb = 360 + RA_grb
#making the sky coordinates
coords = SkyCoord(RA_grb, Dec_grb, unit = "deg")
#finding intersecting sectors
sectors = hp.cone_search_skycoord(coords, radius = err_radius*u.degree)
#making the empty dataframe
df_container = pd.DataFrame()
for i in sectors:
'''
loop over the intersecting sectors to read the files and append to
the df_container
'''
name = name = str("Sector_{}".format(i))
holder = pd.read_csv("Data Files/GLADE_Sectioned/{}.csv".format(name),\
delimiter = ",", index_col = 0)
df_container = df_container.append(holder)
return df_container
#########################################################################################
#########################################################################################
df_master = pd.read_csv("Data Files/GLADE_Master.csv", delimiter = ",", low_memory = False) ##GLADE_Master.csv previously defined
L1 = np.linspace(56, 59, 101) #In J now
L2, c = L_func1(L1) # ##Builds broken power law
cumuL = cumulative(L2) ##Luminosity Distribution
df_cumLum = pd.read_csv("Data Files/Cumulative Luminosity.csv")
df_cumLum.columns = ["NaN", "Cumulative Luminosity"]
normal_c = df_cumLum[["Cumulative Luminosity"]].values[-1][0]
L_rank = df_cumLum[["Cumulative Luminosity"]].values * 1/normal_c
df_cumLum = df_cumLum[["Cumulative Luminosity"]].values# ## This is all to do with building a usable and callable power law
lum_N = np.linspace(0, df_cumLum.shape[0], df_cumLum.shape[0])
df_dL = df_master[["Luminosity Distance"]]
#using HEALPix to split the sky into equal area sectors
hp = HEALPix(nside=16, order='ring', frame=ICRS())
tests = randint(0, 2, size = N) ## If tests[i] = 0, use test galaxy, or if = 1, choose random point beyond the catalog
dummies = random(N)
RandL = random(N)
gals = np.zeros(N) ## Picks out a luminosity
gal_index = np.zeros(N)
"""
aa = np.zeros(shape = (N, 5)) # Storing Angular distance
ab = np.zeros(shape = (N, 5)) # Storing Luminosity Distance
ac = np.zeros(shape = (N, 5)) # Storing B Luminosity
ad = np.zeros(shape = (N, 5)) # Storing Luminosity Probability
"""
lum_atsource = np.zeros(N)
received_luminosity = np.zeros(N)
cumul_N = np.zeros(N)
lum_list = list(L_rank)
df_dL = df_dL.values.tolist() ## Luminosity distance values for use
a = np.zeros(N) ## For storing temporary and unimportant values
b = np.zeros(N) ## For storing temporary and unimportant values
test_ra = df_master[["RA"]]
test_dec = df_master[["dec"]]
indices = list(np.arange(df_master.shape[0]))
error_radius = 2 * (2.62) ## Change as necessary - this is an example value from HEARSCH
percentages = np.zeros(N)
distances = np.zeros(N)
luminosity_i = np.zeros(N)
rank_host = np.zeros(N)
faulty = np.zeros(shape = (N, 5)) ## All of this used to store values
phi = 2 * np.pi * random(N) * (180/np.pi) ## Random positions for rotations
theta = np.arccos(2 * random(N) - 1) * (180/np.pi)
thph = spherical_convert(theta, phi)
mod = np.zeros(N)
for i in range(N):
mod[i] = modulus(thph[:, i])
thph[:, i] /= mod[i]
xyz = np.zeros(shape = (N, 3))
m = np.zeros(shape = (N, 3))
ra_prime = np.zeros(N); dec_prime = | np.zeros(N) | numpy.zeros |
"""
For now, not going to worry about lags, don't have enough data
ISCH is a misnomer, SAR3 is a better definition, although not quite
Documentation from noro:
===============
=== Purpose ===
===============
Intercept-Sin-Cos-Holiday regression. It predicts wILI in some region on some
epiweek using ordinary regression. There are 6 covariates (7 if you count the
intercept term):
- 4 indicator (0/1) variables for holiday weeks (50 or 51 through 01)
- 2 timing variables: sin and cos of the epiweek
When producing retrospective predictions, great care is taken to only use
'valid' data: values that would have actually been available at the time.
However, unstable wILI is only available for recent years and for only some of
the regions (i.e. not in census regions). During training, ISCH will fall back
to stable data if unstable data is unavailable; however, during prediction,
ISCH will raise an Exception if unstable data is unavailable.
Note that the epiweek parameter represents the most recently published issue.
The returned value is a prediction for the following week.
See also:
- arch.py: another system that generates 1-week-ahead predictions
=================
=== Changelog ===
=================
2016-04-11
* allow predictions using invalid (stable) data
2016-04-06
+ initial version
"""
# standard library
import argparse
# third party
import numpy as np
# first party
from delphi.epidata.client.delphi_epidata import Epidata
import delphi.operations.secrets as secrets
import delphi.utils.epiweek as EW
from delphi.nowcast_dengue.util.dengue_data_source import DengueDataSource
from delphi.nowcast_dengue.util.cumulative_to_weekly import cum_to_week
def mutate_rows_as_if_lagged(rows, lag):
for row in rows:
row.update({'lag': lag})
return rows
class ISCH:
@staticmethod
def dot(*Ms):
N = Ms[0]
for M in Ms[1:]:
N = | np.dot(N, M) | numpy.dot |
import os
import os.path as osp
import sys
import numpy as np
import pandas as pd
import math
import argparse
import xml.etree.ElementTree as ET
import json
import matplotlib.pyplot as plt
def main() -> None:
argparser = argparse.ArgumentParser()
argparser.add_argument("-f", "--file", help="Path to .mo file to simulate", type=str, default="")
argparser.add_argument("-w", "--wdir", help="Working directory of the .mo", type=str, default="")
argparser.add_argument("-d", "--dir", help="Source directory path", type=str, default="")
argparser.add_argument("-c", "--csv", help="Enable csv calcola", action="store_true")
argparser.add_argument("-p", "--plot", help="Enable plotting", action="store_true")
args = argparser.parse_args()
mofile = args.file
wdir = args.wdir
directory = args.dir
csv = args.csv
plot = args.plot
if (not (osp.isdir(osp.abspath(wdir)) and osp.isfile(osp.join(osp.abspath(wdir), mofile))) and mofile != "" and wdir != "") or \
not osp.isdir(osp.abspath(directory)):
print("Errore ...")
sys.exit(1)
files = [(mofile, wdir)] if not mofile == "" else []
if directory != "":
for f in os.listdir(osp.abspath(directory)):
new_path = osp.join(osp.abspath(directory), f)
if osp.isdir(f):
for mo in os.listdir(new_path):
if mo.endswith(".mo") and "package" not in mo:
files.append((mo, f))
files.sort(key=lambda x: x[1])
if len(files) == 1:
success = run_for_single(mofile, wdir, csv, plot)
if not success:
print("Errore")
return
print("No errore")
return
for x, y in files[66: 66 + 10]:
print(f"Validating {y} -> {x}")
# Starting do things
success = run_for_single(x, y, csv, plot)
if not success:
print("C'è stato un errore ...")
return
print("No errore")
def run_for_single(mofile: str, wdir: str, csv: bool, plot: bool) -> bool:
try:
current_dir = os.getcwd()
os.chdir(osp.abspath(wdir))
file_mode = "x" if not osp.isfile(osp.join(os.getcwd(), "build.mos")) else "w"
if not csv:
with open("build.mos", mode=file_mode) as stream:
stream.write(create_build_mos(mofile, wdir))
# Build the model
result = os.system("omc build.mos")
if result != 0: return False
# Getting settings
settings = get_settings(wdir)
if not csv:
# Setting settings
tree = ET.parse(f"{wdir}.{mofile[:-3]}_init.xml")
root = tree.getroot()
child = list(root)[0]
child.attrib['stopTime'] = str(settings['stopTime'])
child.attrib['numberOfIntervals'] = str(settings['numberOfIntervals'])
child.attrib['stepSize'] = str(settings['stopTime'] / settings['numberOfIntervals'])
child.attrib['startTime'] = "0.0"
child.attrib['outputFormat'] = "csv"
child.attrib['solver'] = 'dassl'
tree.write(f"{wdir}.{mofile[:-3]}_init.xml")
# Simulate the model
os.system("./{0}".format(f"{wdir}.{mofile[:-3]}"))
# Clear all the unusefull stuff
os.system(f"rm *.log *.libs *_init.xml *.o *.c *.h *.json *.makefile {wdir}.{mofile[:-3]}")
if not plot:
result = calcola(mofile, wdir, settings)
save_json(current_dir, result, wdir)
else:
_plot(mofile, wdir, settings)
os.chdir(current_dir)
return True
except Exception as e:
raise e
return False
def _plot(mofile, wdir, settings: dict) -> None:
curr_wdir = wdir.replace("case", "")
true_csv = pd.read_csv(f"{curr_wdir}-results.csv")
stimated_csv = pd.read_csv(f"{wdir}.{mofile[:-3]}_res.csv")
to_conc = []
heads = []
for head in true_csv.head():
suffix = ""
if head != "time" and head in settings['amounts'] and (head.startswith("S") or head.startswith("X") or head.startswith("T")) and head != "":
suffix = "_amount"
heads.append(head)
elif head != "time" and head in settings['concentrations'] and (head.startswith("S") or head.startswith("X") or head.startswith("T")) and head != "":
suffix = "_conc"
heads.append(head)
if suffix != "" or head == "time": # or head == "S1":
to_conc.append(np.array(stimated_csv[f"{head}{suffix}"]).reshape(-1, 1))
#heads += ["S1"]
#print(len(to_conc), heads)
stimated_np = np.concatenate(tuple(to_conc), axis=1) if len(to_conc) > 1 else to_conc[0]
true_np = np.array(true_csv.values[:, :])
plt.figure(figsize=[15.0, 8.0])
for i in range(1, len(stimated_np[0])):
plt.plot(stimated_np[:, 0], stimated_np[:, i], label=heads[i - 1])
plt.xlabel("Time [s]")
plt.ylabel("Amount")
plt.legend(loc="upper right")
plt.savefig("{0}_plot.png".format(wdir))
plt.close()
plt.figure(figsize=[15.0, 8.0])
for i in range(1, len(stimated_np[0])):
plt.plot(stimated_np[:-1, 0], true_np[:, i], label=heads[i - 1])
plt.xlabel("Time [s]")
plt.ylabel("Amount")
plt.legend(loc="upper right")
plt.savefig("testsuite_{0}_plot.png".format(wdir))
plt.close()
def save_json(curr_dir: str, result: dict, wdir: str) -> None:
curr_test = json.load(open(osp.join(curr_dir, "tests.json"), mode="r"))
curr_test[wdir] = result
with open(osp.join(curr_dir, "tests.json"), mode="w") as stream:
json.dump(curr_test, stream)
def calcola(mofile, wdir, settings: dict) -> dict:
# Open the results and apply the error formule
curr_wdir = wdir.replace("case", "")
true_csv = pd.read_csv(f"{curr_wdir}-results.csv")
stimated_csv = pd.read_csv(f"{wdir}.{mofile[:-3]}_res.csv")
to_conc = []
for head in true_csv.head():
suffix = ""
if head != "time" and head in settings['amounts'] and (head.startswith("S") or head.startswith("X") or head.startswith("T")) and head != "":
suffix = "_amount"
elif head != "time" and head in settings['concentrations'] and (head.startswith("S") or head.startswith("X") or head.startswith("T")) and head != "":
suffix = "_conc"
if head != "time":
to_conc.append( | np.array(stimated_csv[f"{head}{suffix}"]) | numpy.array |
# coding: utf-8
'''
Module to be used for static analysis
'''
import numpy as np
import sympy as sp
import scipy
import matplotlib.pyplot as plt
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
def simple_support():
L = 15
P = 5
Ploc = 5
plt.rcParams['figure.figsize'] = (10, 8) # (width, height)
fig1 = plt.figure()
ax1 = fig1.add_subplot(311) # , aspect='equal')
def add_beam():
# plt.subplot(3,1,1)
# ax = plt.gca()
plt.xlim([-1, L + 1])
plt.ylim([-1, P * 2])
# add rigid ground
rectangle = plt.Rectangle((-1, -2), L + 2, 2, hatch='//', fill=False)
ax1.add_patch(rectangle)
# add rigid rollers
# circle = plt.Circle((0, 5), radius=1, fc='g')
# ax.add_patch(circle)
e1 = patches.Ellipse((0, 2), L / 20, 4, angle=0, linewidth=2, fill=False, zorder=2)
ax1.add_patch(e1)
# add triangle
points = [[L, 4], [L - L / 40, 0], [L + L / 40, 0]]
polygon = plt.Polygon(points, fill=False)
ax1.add_patch(polygon)
# add beam
rectangle = plt.Rectangle((0, 4), L, 4, fill=False)
ax1.add_patch(rectangle)
def point_load():
# point load shear
x = np.linspace(0, L, 100)
y = np.ones(len(x)) * P / 2
y[x > Ploc] = y[x > Ploc] - P
x[0] = 0
x[-1] = 0
plt.subplot(3, 1, 2)
plt.ylabel('<NAME>')
plt.title('Shear Diagram')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# point load bending
x = np.linspace(-L / 2, L / 2, 100)
y = -(x ** 2) + (np.max(x ** 2))
x = np.linspace(0, L, 100)
plt.subplot(3, 1, 3)
plt.title('Bending Diagram')
plt.ylabel('Moment, M')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# add point load
plt.subplot(3, 1, 1)
plt.annotate('P=%i' % P, ha='center', va='bottom',
xytext=(Ploc, 15), xy=(Ploc, 7.5),
arrowprops={'facecolor': 'black', 'shrink': 0.05})
plt.title('Free Body Diagram')
plt.axis('off') # removes axis and labels
# # add point load
# ax1.arrow(3, 11+L/10, 0, -3, head_width=L*0.02, head_length=L*0.1, fc='k', ec='k')
# plt.title('Free Body Diagram')
# plt.axis('off') # removes axis and labels
# #ax1.set_yticklabels('')
def dist_load():
# add distributed load
plt.subplot(3, 1, 1)
for k in np.linspace(0, L, 20):
ax1.arrow(k, 11 + L / 10, 0, -3, head_width=L * 0.01, head_length=L * 0.1, fc='k', ec='k')
plt.title('Free Body Diagram')
plt.axis('off') # removes axis and labels
# ax1.set_yticklabels('')
# dist load shear
x = [0, 0, L, L]
y = [0, 5, -5, 0]
plt.subplot(3, 1, 2)
plt.ylabel('Shear, V')
plt.title('Shear Diagram')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# dist load bending
x = np.linspace(-L / 2, L / 2, 100)
y = -(x ** 2) + (np.max(x ** 2))
x = np.linspace(0, L, 100)
plt.subplot(3, 1, 3)
plt.title('Bending Diagram')
plt.ylabel('Moment, M')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
add_beam()
dist_load()
# point_load()
plt.tight_layout()
plt.show()
def moment_calc():
fig = plt.figure()
ax = plt.axes(projection='3d')
# bar
x = [0, 0, 4, 4]
y = [0, 5, 5, 5]
z = [0, 0, 0, -2]
# Applied Forces
X = [0, 0, 4]
Y = [5, 5, 5]
Z = [0, 0, -2]
U = [-60, 0, 80]
V = [40, 50, 40]
W = [20, 0, -30]
ax.plot(x, y, z, '-b', linewidth=5)
ax.view_init(45, 45)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('Hibbler pg 129 example')
ax.set_xlim([min(X) - 2, max(X) + 2])
ax.set_ylim([min(Y) - 5, max(Y) + 2])
ax.set_zlim([min(Z) - 2, max(Z) + 2])
# plt.tight_layout()
ax.quiver3D(X, Y, Z, U, V, W, pivot='tail');
rA = np.array([0, 5, 0]) # start of F1 and F2
rB = np.array([4, 5, -2]) # start of F3
F1 = np.array([-60, 40, 20])
F2 = np.array([0, 50, 0])
F3 = np.array([80, 40, -30])
M = np.cross(rA, F1) + | np.cross(rA, F2) | numpy.cross |
# BSD 3-Clause License
#
# Copyright (c) 2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import numpy as np
from .type_aliases import NPF, NPI
__all__ = ["region", "split"]
def region(low: NPF, high: NPF) -> tuple[NPF, ...]:
"""Compute the hyper-rectangular region parameters from given limits of integration."""
# :::::::::::::::: Shapes ::::::::::::::::::
# {low, high}.shape [ domain_dim, events ]
# centers.shape [ domain_dim, regions_events ]
# halfwidth.shape [ domain_dim, regions_events ]
# vol.shape [ regions_events ]
if low.shape != high.shape:
raise RuntimeError(
"Vector limits of integration must be equivalent.", low.shape, high.shape
)
if low.ndim == 1:
low = np.expand_dims(low, 0)
high = np.expand_dims(high, 0)
if low.ndim != 2:
raise RuntimeError("Input limits shape not supported.")
centers = (high + low) * 0.5
halfwidth = (high - low) * 0.5
vol = np.prod(2 * halfwidth, axis=0)
return centers, halfwidth, vol
def split(centers: NPF, halfwidth: NPF, volumes: NPF, split_dim: NPI):
# centers.shape [ domain_dim, regions_events ]
# split_dim.shape [ 1, regions_events ]
if np.amin(split_dim) < 0 or np.amax(split_dim) >= (centers.shape[0]):
raise IndexError("split dimension invalid")
if split_dim.ndim < centers.ndim:
split_dim = np.expand_dims(split_dim, 0)
## {center, hwidth} [ domain_dim, (regions, events) ]
mask = | np.zeros_like(centers, dtype=np.bool_) | numpy.zeros_like |
import numpy as np
import gym
from gym import spaces
from numpy.random import default_rng
import pickle
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
from gym_flp import rewards
from IPython.display import display, clear_output
import anytree
from anytree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter
'''
v0.0.3
Significant changes:
08.09.2020:
- Dicrete option removed from spaces; only Box allowed
- Classes for quadtratic set covering and mixed integer programming (-ish) added
- Episodic tasks: no more terminal states (exception: max. no. of trials reached)
12.10.2020:
- mip added
- fbs added
'''
class qapEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance=None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb'))
self.transport_intensity = None
self.instance = instance
self.mode = mode
while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']):
print('Available Problem Sets:', self.DistanceMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.D = self.DistanceMatrices[self.instance]
self.F = self.FlowMatrices[self.instance]
# Determine problem size relevant for much stuff in here:
self.n = len(self.D[0])
# Action space has two option:
# 1) Define as Box with shape (1, 2) and allow values to range from 1 through self.n
# 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x!
# self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=np.int) # Doubles complexity of the problem as it allows the identical action (1,2) and (2,1)
self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1)
# If you are using images as input, the input values must be in [0, 255] as the observation is normalized (dividing by 255 to have values in [0, 1]) when using CNN policies.
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = np.uint8) # Image representation
elif self.mode == 'human':
self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.float32)
self.states = {} # Create an empty dictonary where states and their respective reward will be stored for future reference
self.actions = self.pairwiseExchange(self.n)
# Initialize Environment with empty state and action
self.action = None
self.state = None
self.internal_state = None
#Initialize moving target to incredibly high value. To be updated if reward obtained is smaller.
self.movingTargetReward = np.inf
self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards
def reset(self):
state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False)
#MHC, self.TM = self.MHC.compute(self.D, self.F, state)
self.internal_state = state.copy()
return state
def step(self, action):
# Create new State based on action
fromState = self.internal_state.copy()
swap = self.actions[action]
fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1]
newState = fromState.copy()
#MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation)
MHC, self.TM = self.MHC.compute(self.D, self.F, newState)
if self.mode == 'human':
self.states[tuple(fromState)] = MHC
if self.movingTargetReward == np.inf:
self.movingTargetReward = MHC
#reward = self.movingTargetReward - MHC
reward = -1 if MHC > self.movingTargetReward else 10
self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward
if self.mode == "rgb_array":
rgb = np.zeros((1,self.n,3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((fromState-np.min(fromState))/(np.max(fromState)-np.min(fromState))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(fromState):
rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]]
newState = np.array(rgb)
self.state = newState.copy()
self.internal_state = fromState.copy()
return newState, reward, False, {}
def render(self, mode=None):
if self.mode == "human":
SCALE = 1 # Scale size of pixels for displayability
img_h, img_w = SCALE, (len(self.internal_state))*SCALE
data = np.zeros((img_h, img_w, 3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.internal_state-np.min(self.internal_state))/(np.max(self.internal_state)-np.min(self.internal_state))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(self.internal_state):
data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]]
img = Image.fromarray(data, 'RGB')
if self.mode == 'rgb_array':
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass
def pairwiseExchange(self, x):
actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j]
actions.append((1,1))
return actions
class fbsEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,))
# self.l = np.sqrt(self.A/self.aspect_ratio)
# self.w = np.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone.
self.aspect_ratio = int(max(self.beta)) if not self.beta is None else 1
self.min_length = np.min(self.a) / self.L
self.min_width = | np.min(self.a) | numpy.min |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class NonMaxSuppression(Base):
@staticmethod
def export_nonmaxsuppression_suppress_by_IOU(): # type: () -> None
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],
outputs=['selected_indices']
)
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
expect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU')
@staticmethod
def export_nonmaxsuppression_suppress_by_IOU_and_scores(): # type: () -> None
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],
outputs=['selected_indices']
)
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
expect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU_and_scores')
@staticmethod
def export_nonmaxsuppression_flipped_coordinates(): # type: () -> None
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],
outputs=['selected_indices']
)
boxes = np.array([[
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, 0.9, 1.0, -0.1],
[0.0, 10.0, 1.0, 11.0],
[1.0, 10.1, 0.0, 11.1],
[1.0, 101.0, 0.0, 100.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
expect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_flipped_coordinates')
@staticmethod
def export_nonmaxsuppression_limit_output_size(): # type: () -> None
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],
outputs=['selected_indices']
)
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
expect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_limit_output_size')
@staticmethod
def export_nonmaxsuppression_single_box(): # type: () -> None
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],
outputs=['selected_indices']
)
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[0.9]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = | np.array([0.5]) | numpy.array |
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_start_stop_array(self):
start = array([-120, 120], dtype="int8")
stop = array([100, -100], dtype="int8")
t1 = | linspace(start, stop, 5) | numpy.linspace |
import os
import time
import numpy as np
import scipy.io as sio
import scipy.stats as st
import tensorflow as tf
from models.CifarNet import CifarNetModel
from datasets.cifar10_dataset import Cifar10Dataset
from datasets.hadamard import load_hadamard_matrix
# quantizers
import quantizers.cs_quantizer as np_csq
import quantizers.qsg_quantizer as np_qsg
import quantizers.dithered_transform_quantizer as np_dtq
import quantizers.onebit_quantizer as np_obq
import quantizers.topK_sgd as np_topK
import quantizers.atomo as np_atomo
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
training_algorithm = 'GD'
nn_settings = {
'initial_w': None, # initial weights
'initial_b': None, # initial bias
'training_alg': training_algorithm, # training algorithm
'learning_rate': 0.2, # learning rate
'decay_rate': 0.98, # decay rate
'decay_step': 500, # decay step
'compute_gradients': True, # compute gradients for use in distribtued training
}
db_params = {
'database-dir': 'Database/CIFAR10/raw',
'one-hot': False,
'output-dimension': (24, 24),
'augment-training': True,
}
output_folder = 'QuantizedCS/Quantizer/Cifarnet/mse'
num_evals = 20
batch_size = 256
layer_index = 2
db = Cifar10Dataset(db_settings=db_params)
graph = tf.Graph()
with graph.as_default():
db_images, db_labels, initializer_op = db.create_dataset(['train', 'test'], batch_size, 16)
db_sess = tf.Session(graph=graph)
db_sess.run(initializer_op['train'])
def train_base_model(w0=None, b0=None):
# training is done using batch-size=256
nn_settings['initial_w'] = w0
nn_settings['initial_b'] = b0
nn = CifarNetModel()
nn.create_network(nn_settings)
for _ in range(150):
x, y = db_sess.run([db_images, db_labels])
nn.train(x, y)
w0, b0 = nn.get_weights()
return w0, b0
def evaluate_qsg(nn, bucket_size, fname):
w0, b0 = nn.get_weights()
input_bits = w0[layer_index].size * 32
Q = np.arange(1, 50)
err = np.zeros((len(Q), num_evals))
compression_gain = np.zeros(len(Q))
for nq, q in enumerate(Q):
quantizer = np_qsg.qsg_quantizer(bucket_size, q)
# compute compression gain
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
v, s = quantizer.quantize(gw, reconstructed=False)
compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)
# compute error
for n in range(num_evals):
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
gwh = quantizer.quantize(gw, reconstructed=True)
err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)
sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})
def evaluate_topksg(nn, fname):
w0, b0 = nn.get_weights()
input_bits = w0[layer_index].size * 32
maxK = w0[layer_index].size // 5
K = np.arange(1, maxK, 10)
compression_gain = np.zeros(len(K))
err = np.zeros((len(K), num_evals))
for nk, k in enumerate(K):
quantizer = np_topK.topk_quantizer(k)
# compute compression gain
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
ind, v = quantizer.quantize(gw, reconstructed=False)
compression_gain[nk] = input_bits / (8 * (ind[0].size + ind[1].size) + 32 * v.size)
# compute error
for n in range(num_evals):
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
gwh = quantizer.quantize(gw, reconstructed=True)
err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)
sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'K': K})
def evaluate_atomo(nn, fname):
w0, b0 = nn.get_weights()
input_bits = w0[layer_index].size * 32
maxR = np.prod(w0[layer_index].shape[:-1])
R = np.arange(1, maxR, 10)
compression_gain = np.zeros(len(K))
err = np.zeros((len(K), num_evals))
for nk, k in enumerate(R):
quantizer = np_atomo.atomo_quantizer(k, True)
# compute compression gain
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
u, v, s = quantizer.quantize(gw, reconstructed=False)
compression_gain[nk] = input_bits / (32 * (u.size + v.size + s.size))
# compute error
for n in range(num_evals):
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
gwh = quantizer.quantize(gw, reconstructed=True)
err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)
sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'R': R})
def evaluate_dqtsg(nn, H, fname):
w0, b0 = nn.get_weights()
input_bits = w0[layer_index].size * 32
Q = np.arange(1, 50)
err = np.zeros((len(Q), num_evals))
compression_gain = np.zeros(len(Q))
for nq, q in enumerate(Q):
quantizer = np_dtq.dt_quantizer(H, q)
# compute compression gain
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
v, s = quantizer.quantize(gw, reconstructed=False)
compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)
# compute error
for n in range(num_evals):
x, y = db_sess.run([db_images, db_labels])
gw, _ = nn.get_gradients(x, y)
gw = gw[layer_index]
gwh = quantizer.quantize(gw, reconstructed=True)
err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)
sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})
def evaluate_qcssg(nn, H, fname):
w0, b0 = nn.get_weights()
input_bits = w0[layer_index].size * 32
maxK = H.shape[0]
K = np.arange(1, maxK, 10)
Q = np.arange(1, 5)
compression_gain = np.zeros((len(K), len(Q)))
err = np.zeros((len(K), len(Q), num_evals))
for nk, k in enumerate(K):
print(k/maxK, flush=True)
Hk = H[:, -k:] * | np.sqrt(maxK) | numpy.sqrt |
# Written by <NAME>
#
# Based on:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import numpy as np
import pycocotools.mask as mask_util
from torch.autograd import Variable
import torch
import ipdb
import math
from core.config import cfg
from utils.timer import Timer
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils.fpn as fpn_utils
import utils.image as image_utils
import utils.keypoints as keypoint_utils
from roi_data.hoi_data import get_hoi_blob_names
from roi_data.hoi_data_union import get_hoi_union_blob_names, generate_union_mask, generate_joints_heatmap
from roi_data.hoi_data_union import generate_pose_configmap, generate_part_box_from_kp, generate_part_box_from_kp17
from datasets import json_dataset
import torch.nn.functional as F
import time
def im_detect_all(model, im, box_proposals=None, timers=None, entry=None):
"""Process the outputs of model for testing
Args:
model: the network module
im_data: Pytorch variable. Input batch to the model.
im_info: Pytorch variable. Input batch to the model.
gt_boxes: Pytorch variable. Input batch to the model.
num_boxes: Pytorch variable. Input batch to the model.
args: arguments from command line.
timer: record the cost of time for different steps
The rest of inputs are of type pytorch Variables and either input to or output from the model.
"""
if timers is None:
timers = defaultdict(Timer)
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
# boxes is in origin img size
scores, boxes, im_scale, blob_conv = im_detect_bbox_aug(
model, im, box_proposals)
else:
scores, boxes, im_scale, blob_conv = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals)
timers['im_detect_bbox'].toc()
im_info = np.array([im.shape[:2]+(im_scale[0],)])
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class) (numpy.ndarray)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes, im_scale, blob_conv)
else:
masks = im_detect_mask(model, im_scale, boxes, blob_conv)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(cls_boxes, masks, boxes, im.shape[0], im.shape[1])
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes, blob_conv)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
vcoco_heatmaps = None
if cfg.MODEL.VCOCO_ON:
if cfg.VCOCO.KEYPOINTS_ON:
# ipdb.set_trace()
vcoco_heatmaps, vcoco_heatmaps_np = im_detect_keypoints_vcoco(model, im_scale[0], cls_boxes[1][:, :4], blob_conv)
vcoco_cls_keyps = keypoint_results_vcoco(cls_boxes, vcoco_heatmaps_np)
else:
vcoco_cls_keyps = None
hoi_res = im_detect_hoi_union(model, boxes, scores, cls_boxes[1].shape[0],
im_info, blob_conv, entry,
vcoco_heatmaps)
else:
hoi_res = None
vcoco_cls_keyps = None
return cls_boxes, cls_segms, cls_keyps, hoi_res, vcoco_cls_keyps
def im_detect_all_precomp_box(model, im, timers=None, entry=None, mode='val', category_id_to_contiguous_id=None):
"""Process the outputs of model for testing
Args:
model: the network module
im_data: Pytorch variable. Input batch to the model.
im_info: Pytorch variable. Input batch to the model.
gt_boxes: Pytorch variable. Input batch to the model.
num_boxes: Pytorch variable. Input batch to the model.
args: arguments from command line.
timer: record the cost of time for different steps
The rest of inputs are of type pytorch Variables and either input to or output from the model.
"""
if timers is None:
timers = defaultdict(Timer)
blob_conv, im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
im_info = np.array([im.shape[:2] + (im_scale[0],)])
scores, boxes, cates, cls_boxes = im_detect_bbox_precomp_box(entry, category_id_to_contiguous_id)
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes, im_scale, blob_conv)
else:
masks = im_detect_mask(model, im_scale, boxes, blob_conv)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(cls_boxes, masks, boxes, im.shape[0], im.shape[1])
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes, blob_conv)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
vcoco_heatmaps = None
vcoco_cls_keyps = None
loss = None
if cfg.MODEL.VCOCO_ON:
hoi_res, loss = im_detect_hoi_union(model, boxes, scores, cates, cls_boxes[1].shape[0],
im_info, blob_conv, entry, mode,
vcoco_heatmaps)
else:
hoi_res = None
vcoco_cls_keyps = None
return cls_boxes, cls_segms, cls_keyps, hoi_res, vcoco_cls_keyps, loss
def im_conv_body_only(model, im, target_scale, target_max_size):
inputs, im_scale = _get_blobs(im, None, target_scale, target_max_size)
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = Variable(torch.from_numpy(inputs['data']), volatile=True).cuda()
else:
inputs['data'] = torch.from_numpy(inputs['data']).cuda()
inputs.pop('im_info')
blob_conv = model.module.convbody_net(**inputs)
return blob_conv, im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Prepare the bbox for testing"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']), volatile=True)]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']), volatile=True)]
else:
inputs['data'] = [torch.from_numpy(inputs['data'])]
inputs['im_info'] = [torch.from_numpy(inputs['im_info'])]
time1 = time.time()
return_dict = model(**inputs)
time2 = time.time()
print('model_time:', time2-time1)
if cfg.MODEL.FASTER_RCNN:
rois = return_dict['rois'].data.cpu().numpy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# cls prob (activations after softmax)
scores = return_dict['cls_score'].data.cpu().numpy().squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = return_dict['bbox_pred'].data.cpu().numpy().squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# (legacy) Optionally normalize targets by a precomputed mean and stdev
box_deltas = box_deltas.view(-1, 4) * cfg.TRAIN.BBOX_NORMALIZE_STDS \
+ cfg.TRAIN.BBOX_NORMALIZE_MEANS
pred_boxes = box_utils.bbox_transform(boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS)
pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale, return_dict['blob_conv']
def im_detect_bbox_precomp_box(entry, category_id_to_contiguous_id):
"""Prepare the bbox for testing"""
# box in origin image
pred_boxes = entry['precomp_boxes']
scores = entry['precomp_score']
cates = entry['precomp_cate'].astype(np.int32)
contiguous_cate = list()
for cls in cates:
# ipdb.set_trace()
if category_id_to_contiguous_id.get(cls) is None:
contiguous_cate.append(80)
else:
contiguous_cate.append(category_id_to_contiguous_id[cls])
cates = np.array(contiguous_cate, dtype=cates.dtype)
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
box_sc = np.concatenate([pred_boxes, scores[:, None]], 1)
unique_cates = np.unique(cates)
for c in unique_cates:
if category_id_to_contiguous_id.get(c) is not None:
inds = np.where(cates == c)
cls_boxes[category_id_to_contiguous_id[c]] = box_sc[inds]
if len(cls_boxes[1]) == 0:
cls_boxes[1] = np.empty((0,5), dtype=np.float32)
return scores, pred_boxes, cates, cls_boxes
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i, blob_conv_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i, blob_conv_i
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale, _ = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils.flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_mask(model, im_scale, boxes, blob_conv):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
blob_conv (Variable): base features from the backbone network.
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
pred_masks = model.module.mask_net(blob_conv, inputs)
pred_masks = pred_masks.data.cpu().numpy().squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aug(model, im, boxes, im_scale, blob_conv):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
im_scale (list): image blob scales as returned by im_detect_bbox
blob_conv (Tensor): base features from the backbone network.
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
masks_i = im_detect_mask(model, im_scale, boxes, blob_conv)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
blob_conv, im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf, blob_conv)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
blob_conv, im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes, blob_conv)
return masks_scl
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
blob_conv, im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar, blob_conv)
return masks_ar
def im_detect_keypoints_vcoco(model, im_scale, human_boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if human_boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return None, pred_heatmaps
# project boxes to re-sized image size
human_boxes = np.hstack((np.zeros((human_boxes.shape[0], 1), dtype=human_boxes.dtype),
human_boxes * im_scale))
inputs = {'human_boxes': human_boxes}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'human_boxes')
pred_heatmaps = model.module.vcoco_keypoint_net(blob_conv, inputs)
np_pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
# In case of 1
if np_pred_heatmaps.ndim == 3:
np_pred_heatmaps = np.expand_dims(np_pred_heatmaps, axis=0)
return pred_heatmaps, np_pred_heatmaps
def keypoint_results_vcoco(cls_boxes, pred_heatmaps):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils.get_person_class_index()
xy_preds = keypoint_utils.heatmaps_to_keypoints(pred_heatmaps, cls_boxes[person_idx])
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils.nms_oks(xy_preds, cls_boxes[person_idx], 0.3)
xy_preds = xy_preds[keep, :, :]
# ref_boxes = ref_boxes[keep, :]
# pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def im_detect_keypoints(model, im_scale, boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
pred_heatmaps = model.module.keypoint_net(blob_conv, inputs)
pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
im_scale (list): image blob scales as returned by im_detect_bbox
blob_conv (Tensor): base features from the backbone network.
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes, blob_conv)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
blob_conv, im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf, blob_conv)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils.flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
blob_conv, im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes, blob_conv)
return heatmaps_scl
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
blob_conv, im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar, blob_conv)
return heatmaps_ar
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils.boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def box_results_with_nms_and_limit(scores, boxes): # NOTE: support single-batch
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(np.float32, copy=False)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils.soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.05,
# score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils.box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils.expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
for _ in range(cls_boxes[j].shape[0]):
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = (ref_box[2] - ref_box[0] + 1)
h = (ref_box[3] - ref_box[1] + 1)
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]), (x_0 - ref_box[0]):(x_1 - ref_box[0])]
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(np.array(im_mask[:, :, np.newaxis], order='F'))[0]
# For dumping to json, need to decode the byte string.
# https://github.com/cocodataset/cocoapi/issues/70
rle['counts'] = rle['counts'].decode('ascii')
segms.append(rle)
mask_ind += 1
cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils.get_person_class_index()
xy_preds = keypoint_utils.heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils.nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
# -------------------------- HOI ----------------------------
def im_detect_hoi(model, boxes, scores, human_count, im_info, blob_conv, entry=None, vcoco_heatmaps=None):
hoi_blob_in = get_hoi_blob_names(is_training=False)
# im_info.shape = (1, 3) h, w, scale
im_scale = im_info[0, 2]
# project boxes to re-sized image size
hoi_blob_in['boxes'] = np.hstack((np.zeros((boxes.shape[0], 1), dtype=boxes.dtype),
boxes * im_scale))
hoi_blob_in['scores'] = scores
human_index = np.arange(boxes.shape[0])[:human_count]
object_index = np.arange(boxes.shape[0])[human_count:]
interaction_human_inds, interaction_target_object_inds \
= np.repeat(human_index, object_index.size), np.tile(object_index - human_count, human_index.size)
hoi_blob_in['human_index'] = human_index
hoi_blob_in['target_object_index'] = object_index
hoi_blob_in['interaction_human_inds'] = interaction_human_inds
hoi_blob_in['interaction_target_object_inds'] = interaction_target_object_inds
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(hoi_blob_in, 'boxes')
# if no human box is detected, not use hoi_head, just return nan
if human_index.size > 0:
hoi_blob_out = model.module.hoi_net(blob_conv, hoi_blob_in, im_info, vcoco_heatmaps)
# ipdb.set_trace()
# if entry:
# test_hoi_fill_hoi_blob_from_gt(hoi_blob_out, entry, im_scale)
hoi_res = hoi_res_gather(hoi_blob_out, im_scale, entry)
else:
# ToDo: any problem here?
hoi_res = dict(
agents=np.full((1, 4 + cfg.VCOCO.NUM_ACTION_CLASSES), np.nan),
roles=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
)
return hoi_res
def hoi_res_gather(hoi_blob, im_scale, entry=None):
'''
Convert predicted score and location to triplets
:param hoi_blob:
:param im_scale:
:param entry:
:return:
'''
# ToDo: modify comments
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
human_action_score = F.sigmoid(hoi_blob['human_action_score']).cpu().numpy()
human_action_bbox_pred = hoi_blob['human_action_bbox_pred'].cpu().numpy()
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy()
human_score = hoi_blob['scores'][hoi_blob['human_index']]
object_score = hoi_blob['scores'][hoi_blob['target_object_index']]
# scale to original image size when testing
boxes = hoi_blob['boxes'][:, 1:] / im_scale
# For actions don't interact with object, action_score is s_h * s^a_h
# For triplets(interact with objects), action_score is s_h * s_o * s^a_h * g^a_h,o
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK)
triplet_action_mask = np.tile(action_mask.transpose((1, 0)), (human_action_score.shape[0], 1, 1))
# For actions that that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
human_action_pair_score = human_score[:, np.newaxis] * human_action_score
# in case there is no role-objects
if hoi_blob['target_object_index'].size > 0:
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
interaction_action_score = \
interaction_action_score.reshape(human_score.size, object_score.size, -1).transpose(0, 2, 1)
interaction_action_score = np.repeat(interaction_action_score, num_target_object_types, axis=1
).reshape(-1, object_score.size)
# get target localization term g^a_h,o
target_localization_term = target_localization(boxes, hoi_blob['human_index'],
hoi_blob['target_object_index'], human_action_bbox_pred)
# find the object box that maximizes S^a_h,o
# `for each human / action pair we find the object box that maximizes S_h_o^a`
object_action_score = object_score * interaction_action_score * target_localization_term
choosed_object_inds = np.argmax(object_action_score, axis=-1)
# choose corresponding target_localization_term
target_localization_term = target_localization_term[np.arange(choosed_object_inds.size), choosed_object_inds]
# ToDo: choose top-50
# triplet score S^a_h,o
triplet_action_score = \
np.repeat(human_score, num_action_classes * num_target_object_types) * \
object_score[choosed_object_inds] * \
np.repeat(human_action_score, num_target_object_types, axis=1).ravel() * \
target_localization_term
# transform to (human_num, action_num, num_target_object_types)
triplet_action_score = triplet_action_score.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
# ToDo: thresh
# triplet_action_score[triplet_action_mask <= cfg.TEST.SCORE_THRESH] = np.nan
if entry:
# assert triplet_action_score.shape == entry['gt_role_id'][hoi_blob['human_index']].shape
for i in range(len(triplet_action_score.shape)):
pass
# assert np.all(np.where(triplet_action_score > 0.9)[i] ==
# np.where(entry['gt_role_id'][hoi_blob['human_index']] > -1)[i])
# choose appropriate score
# ToDo: any problem here?
# As not every action that defined interacts with objects will have
# corresponding objects in one image, and triplet_action_score always
# have a object box, should I set a thresh or some method to choose
# score between human_action_pair_score and triplet score???
# OR wrong result will be excluded when calculate AP??
# action_score = np.zeros(human_action_score.shape)
# action_score[human_action_mask == 0] = human_action_pair_score[human_action_mask == 0]
# action_score[human_action_mask == 1] = np.amax(triplet_action_score, axis=-1)[human_action_mask == 1]
# set triplet action score don't interact with object to zero
# triplet_action_score[triplet_action_mask == 0] = np.nan
triplet_action_score[triplet_action_mask == 0] = -1
top_k_value = triplet_action_score.flatten()[
np.argpartition(triplet_action_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
triplet_action_score[triplet_action_score <= top_k_value] = np.nan
# get corresponding box of role-objects
choosed_object_inds = choosed_object_inds.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
choosed_objects = boxes[hoi_blob['target_object_index']][choosed_object_inds]
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
action_score = human_action_pair_score
# ToDo: threshold
# action_score[action_score <= cfg.TEST.SCORE_THRESH] = np.nan
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
agents = np.hstack((boxes[hoi_blob['human_index']], action_score))
roles = np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1)
roles = np.stack([roles[:, :, i, :].reshape(-1, num_action_classes * 5) for i in range(num_target_object_types)], axis=-1)
return_dict = dict(
# image_id=i
agents=agents,
roles=roles
)
return return_dict
def target_localization(boxes, human_index, object_index, target_location):
"""
Target localization term in paper, g^a_h,o
Measure compatibility between human-object relative location and
target location, which is predicted by hoi-head
:param boxes:
:param human_index:
:param object_index:
:param target_location:
:return:
"""
human_boxes = boxes[human_index]
object_boxes = boxes[object_index]
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
# relative location between every human box and object box
# ToDo: add cfg.MODEL.BBOX_REG_WEIGHTS
relative_location = box_utils.bbox_transform_inv(
np.repeat(human_boxes, object_boxes.shape[0], axis=0),
np.tile(object_boxes, (human_boxes.shape[0], 1))
).reshape(human_boxes.shape[0], object_boxes.shape[0], 4)
# reshape target location same shape as relative location
target_location = target_location.reshape(-1, num_action_classes * num_target_object_types, 4)
# tile to human_num * (num_action_classes * num_target_object_types * object_num) * 4
relative_location, target_location = \
np.tile(relative_location, (1, num_action_classes * num_target_object_types, 1)), \
np.repeat(target_location, relative_location.shape[1], axis=1)
compatibility = np.sum(np.square((relative_location - target_location)), axis=-1)
# It seems the paper make a mistake here
compatibility = np.exp(-compatibility / (2 * cfg.VCOCO.TARGET_SIGMA ** 2))
# reshape to (human_num * num_action_classes * num_target_object_types, object_num)
compatibility = compatibility.reshape(human_index.size * num_action_classes * num_target_object_types,
object_index.size)
return compatibility
# ------------------test interact net code ------------------
# ToDo: will be cleaned
def test_hoi_fill_hoi_blob_from_gt(hoi_blob, entry, im_scale):
"""['boxes', 'human_index', 'target_object_index', 'interaction_human_inds',
'interaction_target_object_inds', 'interaction_batch_idx', 'human_action_labels',
'human_action_targets', 'action_target_weights', 'interaction_action_labels',
'boxes_fpn2', 'boxes_fpn3', 'boxes_fpn4', 'boxes_fpn5', 'boxes_idx_restore_int32',
'human_action_score', 'human_action_bbox_pred', 'interaction_action_score']"""
hoi_blob['boxes'] = np.hstack((np.zeros((entry['boxes'].shape[0], 1), dtype=hoi_blob['boxes'].dtype),
entry['boxes'])) * im_scale
hoi_blob['scores'] = np.ones(entry['boxes'].shape[0])
human_index = np.where(entry['gt_actions'][:, 0] > -1)[0]
# all object could be target object
target_object_index = np.arange(entry['boxes'].shape[0], dtype=human_index.dtype)
interaction_human_inds, interaction_target_object_inds \
= np.repeat(np.arange(human_index.size), target_object_index.size), \
np.tile(np.arange(target_object_index.size), human_index.size)
hoi_blob['human_index'] = human_index
hoi_blob['target_object_index'] = target_object_index
hoi_blob['interaction_human_inds'] = interaction_human_inds
hoi_blob['interaction_target_object_inds'] = interaction_target_object_inds
human_action_score = entry['gt_actions'][human_index]
hoi_blob['human_action_score'] = torch.from_numpy(human_action_score).cuda()
action_label_mat = generate_action_mat(entry['gt_role_id'])
triplet_label = action_label_mat[human_index[interaction_human_inds],
target_object_index[interaction_target_object_inds]]
hoi_blob['interaction_action_score'] = torch.from_numpy(triplet_label).cuda()
human_action_bbox_pred, _ = \
_compute_action_targets(entry['boxes'][human_index], entry['boxes'],
entry['gt_role_id'][human_index])
hoi_blob['human_action_bbox_pred'] = torch.from_numpy(human_action_bbox_pred).cuda()
def generate_action_mat(gt_role_id):
'''
Generate a matrix to store action triplet
:param gt_role_id:
:return: action_mat, row is person id, column is role-object id,
third axis is action id
'''
mat = np.zeros((gt_role_id.shape[0], gt_role_id.shape[0], cfg.VCOCO.NUM_ACTION_CLASSES, gt_role_id.shape[-1]), dtype=np.float32)
obj_ids = gt_role_id[np.where(gt_role_id > -1)]
human_ids, action_cls, role_cls = np.where(gt_role_id > -1)
assert role_cls.size == human_ids.size == action_cls.size == obj_ids.size
mat[human_ids, obj_ids, action_cls, role_cls] = 1
return mat
def _compute_action_targets(person_rois, gt_boxes, role_ids):
'''
Compute action targets
:param person_rois: rois assigned to gt acting-human, n * 4
:param gt_boxes: all gt boxes in one image
:param role_ids: person_rois_num * action_cls_num * num_target_object_types, store person rois corresponding role object ids
:return:
'''
assert person_rois.shape[0] == role_ids.shape[0]
# should use cfg.MODEL.BBOX_REG_WEIGHTS?
# calculate targets between every person rois and every gt_boxes
targets = box_utils.bbox_transform_inv(np.repeat(person_rois, gt_boxes.shape[0], axis=0),
np.tile(gt_boxes, (person_rois.shape[0], 1)),
(1., 1., 1., 1.)).reshape(person_rois.shape[0], gt_boxes.shape[0], -1)
# human action targets is (person_num: 16, action_num: 26, role_cls: 2, relative_location: 4)
# don't use np.inf, so that actions without target_objects could kept
human_action_targets = np.zeros((role_ids.shape[0], role_ids.shape[1],
role_ids.shape[2], 4), dtype=np.float32)
action_target_weights = np.zeros_like(human_action_targets, dtype=np.float32)
# get action targets relative location
human_action_targets[np.where(role_ids > -1)] = \
targets[np.where(role_ids > -1)[0], role_ids[np.where(role_ids > -1)].astype(int)]
action_target_weights[np.where(role_ids > -1)] = 1.
return human_action_targets.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4), \
action_target_weights.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4)
# ------------------------------- HOI union ------------------------------------
def im_detect_hoi_union(model, boxes, scores, cates, human_count, im_info, blob_conv, entry=None, mode='val', vcoco_heatmaps=None):
loss = dict(
interaction_action_loss=None,
interaction_action_accuray_cls=None)
hoi_blob_in = get_hoi_union_blob_names(is_training=False)
# im_info.shape = (1, 3)
im_scale = im_info[0, 2]
# project boxes to re-sized image size
scaled_boxes = np.hstack((np.zeros((boxes.shape[0], 1), dtype=boxes.dtype),
boxes * im_scale))
# ToDo: choose top 16 human boxes, top 64 target boxes??
# ToDo: lower nms thresh, triplet nms
human_inds = np.where(cates == 1)[0]
human_boxes = scaled_boxes[human_inds]
human_scores = scores[human_inds]
# human_boxes = scaled_boxes[:human_count]
# human_scores = scores[:human_count]
# keep_human_inds = np.where(human_scores >= cfg.VCOCO.TEST_HUMAN_SCORE_THRESH)[0][:16] # ToDo:
keep_human_inds = np.where(human_scores >= cfg.VCOCO.TEST_HUMAN_SCORE_THRESH)[0]
human_boxes = human_boxes[keep_human_inds]
human_scores = human_scores[keep_human_inds]
# select target objects boxes, all boxes are used as targets, including human
# ToDo: try different targets number
# keep_target_inds = np.where(scores >= cfg.VCOCO.TEST_TARGET_OBJECT_SCORE_THRESH)[0][:64]
keep_target_inds = np.where(scores >= cfg.VCOCO.TEST_TARGET_OBJECT_SCORE_THRESH)[0]
target_boxes = scaled_boxes[keep_target_inds]
target_scores = scores[keep_target_inds]
target_classes = cates[keep_target_inds]
interaction_human_inds, interaction_object_inds, union_boxes, spatial_info =\
generate_triplets(human_boxes, target_boxes)
target_cls_mat = np.zeros((target_boxes.shape[0], cfg.MODEL.NUM_CLASSES)).astype(np.float32)
target_cls_mat[:, target_classes] = 1.0
hoi_blob_in['human_boxes'] = human_boxes
hoi_blob_in['object_boxes'] = target_boxes
hoi_blob_in['object_classes'] = target_cls_mat
hoi_blob_in['union_boxes'] = union_boxes
hoi_blob_in['human_scores'] = human_scores
hoi_blob_in['object_scores'] = target_scores
hoi_blob_in['spatial_info'] = spatial_info
hoi_blob_in['interaction_human_inds'] = interaction_human_inds
hoi_blob_in['interaction_object_inds'] = interaction_object_inds
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(hoi_blob_in, 'human_boxes')
_add_multilevel_rois_for_test(hoi_blob_in, 'object_boxes')
_add_multilevel_rois_for_test(hoi_blob_in, 'union_boxes')
else:
blob_conv = blob_conv[-1]
# if no human box is detected, not use hoi_head, just return nan
if human_boxes.size > 0:
rois_keypoints = entry['precomp_keypoints']
human_keypoints = rois_keypoints[human_inds[keep_human_inds]]
union_kps, part_boxes, flag = get_pred_keypoints(human_boxes, human_keypoints, interaction_human_inds, im_scale)
vcoco_heatmaps, union_mask, rescale_kps = generate_joints_heatmap(union_kps, union_boxes[:, 1:]/im_scale,
human_boxes[interaction_human_inds, 1:]/im_scale,
target_boxes[interaction_object_inds, 1:]/im_scale)
pose_configmap = generate_pose_configmap(union_kps, union_boxes[:, 1:]/im_scale,
human_boxes[interaction_human_inds, 1:]/im_scale,
target_boxes[interaction_object_inds, 1:]/im_scale)
hoi_blob_in['union_mask'] = union_mask
hoi_blob_in['rescale_kps'] = rescale_kps
hoi_blob_in['part_boxes'] = part_boxes
hoi_blob_in['flag'] = flag
hoi_blob_in['poseconfig'] = pose_configmap
# # Testing. Replace pred action with gt action
if cfg.DEBUG_TEST_WITH_GT and cfg.DEBUG_TEST_GT_ACTION and entry is not None:
hoi_blob_out = test_det_bbox_gt_action(hoi_blob_in, entry, im_info)
else:
hoi_blob_out = model.module.hoi_net(blob_conv, hoi_blob_in, im_info, vcoco_heatmaps)
affinity_mat = None
if entry.get('affinity_mat') is not None:
affinity_mat = entry['affinity_mat']
affinity_mat = affinity_mat[human_inds[keep_human_inds]][:, keep_target_inds]
hoi_res, interaction_affinity_score = hoi_union_res_gather(hoi_blob_out, im_scale, affinity_mat, entry)
human_action_labels, interaction_action_labels, interaction_affinity_label, \
total_action_num, recall_action_num, total_affinity_num, recall_affinity_num = \
get_gt_labels(entry, human_boxes, target_boxes, interaction_human_inds.shape[0], im_scale)
hoi_blob_out['human_action_labels'] = human_action_labels
hoi_blob_out['interaction_action_labels'] = interaction_action_labels
hoi_blob_out['interaction_affinity'] = interaction_affinity_label
interaction_action_loss, interaction_affinity_loss, \
interaction_action_accuray_cls, interaction_affinity_cls = model.module.HOI_Head.loss(hoi_blob_out)
loss = dict(
interaction_action_loss=float(interaction_action_loss.cpu()),
interaction_action_accuray_cls=float(interaction_action_accuray_cls.cpu()),
interaction_affinity_loss=float(interaction_affinity_loss),
interaction_affinity_cls=float(interaction_affinity_cls),
interaction_affinity_label=interaction_affinity_label,
interaction_affinity_score=interaction_affinity_score,
total_action_num = total_action_num,
total_affinity_num = total_affinity_num,
recall_action_num = recall_action_num,
recall_affinity_num = recall_affinity_num)
else:
# ToDo: any problem here?
hoi_res = dict(
agents=np.full((1, 4 + cfg.VCOCO.NUM_ACTION_CLASSES), np.nan),
roles=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
roles1=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
)
return hoi_res, loss
def get_maxAgent(agents_i, separated_parts_o):
'''
given agents_i, choosed best agents by pred score
N x (4+26) x 3
'''
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
#num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
agents_i = agents_i.reshape((-1, 4 + num_action_classes, separated_parts_o))
boxes = agents_i[:,:4, 0]
scores = agents_i[:, 4:, :] # N x 26 x o
choose_id = np.argmax(scores, axis=-1) # N x 26
choose_id_ = choose_id.reshape(-1) #
scores_ = scores.reshape((-1, separated_parts_o)) #
assert scores_.shape[0] == len(choose_id_)
choosed_score = scores_[np.arange(len(choose_id_)), choose_id_]
choosed_score = choosed_score.reshape(scores.shape[:-1])
return np.hstack((boxes, choosed_score)) # N x 30
def get_maxRole(roles_i, separated_parts_o):
'''
given roles_i, choose best roles by pred score
'''
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
roles_i = roles_i.reshape((-1, num_action_classes, 5, num_target_object_types, separated_parts_o))
role_score = roles_i[:,:,-1:] # N x 26 x 1 x 2 x o
choose_id = np.argmax(role_score, axis=-1) # N x 26 x 1 x 2
choose_id = np.tile(choose_id, (1,1,5,1)) # N x 26 x 5 x 2
choose_id_ = choose_id.reshape(-1)
roles_i_ = roles_i.reshape((-1, separated_parts_o))
assert roles_i_.shape[0] == len(choose_id_)
outs = roles_i_[np.arange(len(choose_id_)), choose_id_] # N
return outs.reshape((roles_i.shape[0], num_action_classes*5, num_target_object_types))
def hoi_union_res_gather(hoi_blob, im_scale, interaction_affinity_score=None, entry=None):
'''
Convert predicted score and location to triplets
:param hoi_blob:
:param im_scale:
:param entry:
:return:
'''
# ToDo: modify comments
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
# (1) interaction_affinity_score
interaction_affinity_score = F.sigmoid(hoi_blob['interaction_affinity_score']).cpu().numpy() ##N*1
# interaction_affinity_score = 1 / (np.exp(-interaction_affinity_score)+1)
# (2) interaction_action_score
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy() ## N*24
## combine interaction_action_score and interaction_affinity_score (1+3)
interaction_action_score1 = interaction_action_score * interaction_affinity_score
human_score = hoi_blob['human_scores']
object_score = hoi_blob['object_scores']
## use LIS to human_score
# human_score = LIS(human_score)
# object_score = LIS(object_score)
# scale to original image size when testing
human_boxes = hoi_blob['human_boxes'][:, 1:] / im_scale
object_boxes = hoi_blob['object_boxes'][:, 1:] / im_scale
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK).T
# triplet_action_mask = np.tile(action_mask, (human_action_score.shape[0], 1, 1))
# For actions that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
# human_action_pair_score = human_score[:, np.newaxis] * human_action_score
# ToDo: try just use human score as human action pair score
# we can get better results for `pred bbox and gt action`
# human_action_pair_score = human_score[:, np.newaxis]
interaction_score_list = [interaction_action_score, interaction_action_score1]
role_list = []
for inter_idx in range((len(interaction_score_list))):
# in case there is no role-objects
if hoi_blob['object_boxes'].size > 0:
# triplets score
triplet_action_score = interaction_score_list[inter_idx] * \
human_score[hoi_blob['interaction_human_inds']][:, np.newaxis] * \
object_score[hoi_blob['interaction_object_inds']][:, np.newaxis]
# ToDo: try just use interaction_action_score, better results for `pred bbox and gt action`
# triplet_action_score = interaction_action_score
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
triplet_action_score_tmp = np.zeros(
(triplet_action_score.shape[0], num_action_classes, num_target_object_types),
dtype=triplet_action_score.dtype)
triplet_action_score_tmp[:, np.where(action_mask > 0)[0], np.where(action_mask > 0)[1]] = \
triplet_action_score
triplet_action_score = triplet_action_score_tmp
# interaction_action_score = interaction_action_score_tmp.reshape(human_score.size, object_score.size, -1)
# interaction_action_score = interaction_action_score.transpose(0, 2, 1).reshape(-1, object_score.size)
triplet_action_score = triplet_action_score.reshape(human_score.size, object_score.size,
num_action_classes, num_target_object_types)
"""********** remove misgrouping case before hard nms ************"""
# triplet_action_score_mask = remove_mis_group(hoi_blob, entry, im_scale)
# if triplet_action_score_mask is not None:
# # ipdb.set_trace()
# triplet_action_score = triplet_action_score * triplet_action_score_mask[:,:,None,None]
# ToDo: one person one action one object
# ToDo: or one pair three action
choosed_object_inds = np.argmax(triplet_action_score, axis=1)
triplet_action_score = np.max(triplet_action_score, axis=1)
# triplet_action_score[triplet_action_score < 0.3] = -1
# triplet_action_score[triplet_action_mask == 0] = -1
"""********* keep top k value **********"""
# top_k_value = triplet_action_score.flatten()[
# np.argpartition(triplet_action_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
# triplet_action_score[triplet_action_score <= top_k_value] = np.nan
choosed_objects = object_boxes[choosed_object_inds]
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
# action_score = human_action_pair_score
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
# agents = np.hstack((human_boxes, action_score))
agents = np.hstack((human_boxes, np.zeros((human_boxes.shape[0], num_action_classes))))
roles = np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1)
roles = np.stack([roles[:, :, i, :].reshape(-1, num_action_classes * 5) for i in range(num_target_object_types)], axis=-1)
role_list.append(roles)
return_dict = dict(
agents=agents,
roles=role_list[0],
roles1=role_list[1],
)
return return_dict, interaction_affinity_score
def hoi_union_res_gather_action_first(hoi_blob, im_scale, entry=None):
"""
A rough try to mitigate targets invisible problem, role_ap1 could achieve 41.5
:param hoi_blob:
:param im_scale:
:param entry:
:return:
"""
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
human_action_score = F.sigmoid(hoi_blob['human_action_score']).cpu().numpy()
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy()
human_score = hoi_blob['human_scores']
object_score = hoi_blob['object_scores']
# scale to original image size when testing
human_boxes = hoi_blob['human_boxes'][:, 1:] / im_scale
object_boxes = hoi_blob['object_boxes'][:, 1:] / im_scale
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK).T
triplet_action_mask = np.tile(action_mask, (human_action_score.shape[0], 1, 1))
# For actions that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
human_action_pair_score = human_action_score * human_score[:, np.newaxis]
# in case there is no role-objects
if hoi_blob['object_boxes'].size > 0:
# triplets score
triplet_action_score = interaction_action_score * \
human_score[hoi_blob['interaction_human_inds']][:, np.newaxis] * \
object_score[hoi_blob['interaction_object_inds']][:, np.newaxis]
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
triplet_action_score_tmp = np.zeros(
(triplet_action_score.shape[0], num_action_classes, num_target_object_types),
dtype=triplet_action_score.dtype)
triplet_action_score_tmp[:, np.where(action_mask > 0)[0], np.where(action_mask > 0)[1]] = \
triplet_action_score
triplet_action_score = triplet_action_score_tmp
# interaction_action_score = interaction_action_score_tmp.reshape(human_score.size, object_score.size, -1)
# interaction_action_score = interaction_action_score.transpose(0, 2, 1).reshape(-1, object_score.size)
triplet_action_score = triplet_action_score.reshape(human_score.size, object_score.size,
num_action_classes, num_target_object_types)
choosed_object_inds = np.argmax(triplet_action_score, axis=1)
triplet_action_score = np.max(triplet_action_score, axis=1)
# triplet_action_score[triplet_action_score < 0.3] = -1
triplet_topN = cfg.VCOCO.KEEP_TOP_NUM
# only keep top 7 target bboxes
triplet_object_topN = 7
# triplet_action_score[triplet_action_mask == 0] = -1
top_k_value = triplet_action_score.flatten()[
np.argpartition(triplet_action_score, -triplet_topN, axis=None)[-triplet_topN]]
triplet_action_score[triplet_action_score <= top_k_value] = np.nan
top_k_value_for_object = triplet_action_score.flatten()[
np.argpartition(triplet_action_score, -triplet_object_topN, axis=None)[-triplet_object_topN]]
choosed_objects = object_boxes[choosed_object_inds]
# Other objects except top7 will set nan
choosed_objects[triplet_action_score < top_k_value_for_object] = np.nan
# mix human action pair score with triplets score
# for some actions that targets are invisible, human action score are better than triplets score
# stack to reshape to (human number, action_classes, targets classes)
human_centric_score = np.stack((human_action_pair_score, human_action_pair_score), axis=2)
# Top 25
human_top_k_value = human_centric_score.flatten()[
np.argpartition(human_centric_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
human_centric_score[human_centric_score <= human_top_k_value] = 0
# human action score is the product of two items(human score, action score)
# multiply 0.75 to approach triplets score
human_centric_score *= 0.75
# select maximum score
triplet_action_score = np.maximum(human_centric_score, triplet_action_score)
triplet_action_score[triplet_action_score == 0] = np.nan
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
action_score = human_action_pair_score
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
agents = np.hstack((human_boxes, action_score))
roles = np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1)
roles = np.stack([roles[:, :, i, :].reshape(-1, num_action_classes * 5) for i in range(num_target_object_types)], axis=-1)
return_dict = dict(
# image_id=i
agents=agents,
roles=roles
)
return return_dict
def generate_triplets(human_boxes, object_boxes):
human_inds, object_inds = np.meshgrid(np.arange(human_boxes.shape[0]),
np.arange(object_boxes.shape[0]), indexing='ij')
human_inds, object_inds = human_inds.reshape(-1), object_inds.reshape(-1)
union_boxes = box_utils.get_union_box(human_boxes[human_inds][:, 1:],
object_boxes[object_inds][:, 1:])
union_boxes = np.hstack((np.zeros((union_boxes.shape[0], 1), dtype=union_boxes.dtype), union_boxes))
spatial_info = box_utils.bbox_transform_inv(human_boxes[human_inds][:, 1:],
object_boxes[object_inds][:, 1:])
return human_inds, object_inds, union_boxes, spatial_info
# --------------------- Check bottleneck ---------------------------
def test_det_bbox_gt_action(hoi_blob_in, entry, im_info):
# check interaction branch, bbox res from test, interaction from gt
gt_human_inds = np.where(entry['gt_classes'] == 1)[0]
gt_human_boxes = entry['boxes'][gt_human_inds]
pred_human_boxes = hoi_blob_in['human_boxes']/im_info[0, 2]
human_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_human_boxes[:, 1:].astype(dtype=np.float32, copy=False),
gt_human_boxes.astype(dtype=np.float32, copy=False))
human_pred_to_gt_inds = np.argmax(human_pred_gt_overlaps, axis=1)
human_ious = human_pred_gt_overlaps.max(axis=1)[:, None]
human_score = np.zeros(human_ious.shape)
human_score[np.where(human_ious > 0.5)] = 1
# assign gt interaction to mapping pred bboxes
human_action = entry['gt_actions'][gt_human_inds[human_pred_to_gt_inds]]
# multiply iou to human action, better localization better action score
# human_action = human_ious * human_action
human_action = human_score * human_action
# ------------------------------- Targets -----------------------------------
# ipdb.set_trace()
pred_target_boxes = hoi_blob_in['object_boxes']/im_info[0, 2]
target_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_target_boxes[:, 1:].astype(dtype=np.float32, copy=False),
entry['boxes'].astype(dtype=np.float32, copy=False))
target_pred_to_gt_inds = np.argmax(target_pred_gt_overlaps, axis=1)
target_ious = target_pred_gt_overlaps.max(axis=1)[:, None]
target_score = np.zeros(target_ious.shape)
target_score[np.where(target_ious > 0.5)] = 1
gt_action_mat = generate_action_mat(entry['gt_role_id'])
# ToDo: there is a problem, here we ignore `interaction triplets` that
# targets is invisible
action_labels = gt_action_mat[gt_human_inds[human_pred_to_gt_inds[hoi_blob_in['interaction_human_inds']]],
target_pred_to_gt_inds[hoi_blob_in['interaction_object_inds']]]
# triplet_ious = human_ious[hoi_blob_in['interaction_human_inds']] * \
# target_ious[hoi_blob_in['interaction_object_inds']]
# # multiply iou
# action_labels = triplet_ious[:, None] * action_labels
triplet_scores = human_score[hoi_blob_in['interaction_human_inds']] * \
target_score[hoi_blob_in['interaction_object_inds']]
action_labels = triplet_scores[:, None] * action_labels
# convert to 24-class
interaction_action_mask = np.array(cfg.VCOCO.ACTION_MASK).T
action_labels = action_labels[:, np.where(interaction_action_mask > 0)[0], | np.where(interaction_action_mask > 0) | numpy.where |
"""
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
`InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
import json
import warnings
from gradio.component import Component
import numpy as np
import PIL
from gradio import processing_utils, test_data
import pandas as pd
from ffmpy import FFmpeg
import math
import tempfile
import csv
class InputComponent(Component):
"""
Input Component. All input components subclass this.
"""
def __init__(self, label, requires_permissions=False):
self.set_interpret_parameters()
super().__init__(label, requires_permissions)
def preprocess(self, x):
"""
Any preprocessing needed to be performed on function input.
"""
return x
def serialize(self, x, called_directly):
"""
Convert from a human-readable version of the input (path of an image, URL of a video, etc.) into the interface to a serialized version (e.g. base64) to pass into an API. May do different things if the interface is called() vs. used via GUI.
Parameters:
x (Any): Input to interface
called_directly (bool): if true, the interface was called(), otherwise, it is being used via the GUI
"""
return x
def preprocess_example(self, x):
"""
Any preprocessing needed to be performed on an example before being passed to the main function.
"""
return x
def set_interpret_parameters(self):
'''
Set any parameters for interpretation.
'''
return self
def get_interpretation_neighbors(self, x):
'''
Generates values similar to input to be used to interpret the significance of the input in the final output.
Parameters:
x (Any): Input to interface
Returns: (neighbor_values, interpret_kwargs, interpret_by_removal)
neighbor_values (List[Any]): Neighboring values to input x to compute for interpretation
interpret_kwargs (Dict[Any]): Keyword arguments to be passed to get_interpretation_scores
interpret_by_removal (bool): If True, returned neighbors are values where the interpreted subsection was removed. If False, returned neighbors are values where the interpreted subsection was modified to a different value.
'''
pass
def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
'''
Arrange the output values from the neighbors into interpretation scores for the interface to render.
Parameters:
x (Any): Input to interface
neighbors (List[Any]): Neighboring values to input x used for interpretation.
scores (List[float]): Output value corresponding to each neighbor in neighbors
kwargs (Dict[str, Any]): Any additional arguments passed from get_interpretation_neighbors.
Returns:
(List[Any]): Arrangement of interpretation scores for interfaces to render.
'''
pass
def generate_sample(self):
"""
Returns a sample value of the input that would be accepted by the api. Used for api documentation.
"""
pass
class Textbox(InputComponent):
"""
Component creates a textbox for user to enter input. Provides a string as an argument to the wrapped function.
Input type: str
Demos: hello_world, diff_texts
"""
def __init__(self, lines=1, placeholder=None, default="", numeric=False, type="str", label=None):
"""
Parameters:
lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea.
numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): DEPRECATED. Type of value to be returned by component. "str" returns a string, "number" returns a float value. Use Number component in place of number type.
label (str): component name in interface.
"""
self.lines = lines
self.placeholder = placeholder
self.default = default
if numeric or type == "number":
warnings.warn(
"The 'numeric' type has been deprecated. Use the Number input component instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
if default == "":
self.test_input = {
"str": "the quick brown fox jumped over the lazy dog",
"number": 786.92,
}.get(type)
else:
self.test_input = default
self.interpret_by_tokens = True
super().__init__(label)
def get_template_context(self):
return {
"lines": self.lines,
"placeholder": self.placeholder,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {"lines": 7},
}
def preprocess(self, x):
"""
Parameters:
x (str): text input
"""
if self.type == "str":
return x
elif self.type == "number":
return float(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'str', 'number'.")
def preprocess_example(self, x):
"""
Returns:
(str): Text representing function input
"""
return x
def set_interpret_parameters(self, separator=" ", replacement=None):
"""
Calculates interpretation score of characters in input by splitting input into tokens, then using a "leave one out" method to calculate the score of each token by removing each token and measuring the delta of the output value.
Parameters:
separator (str): Separator to use to split input into tokens.
replacement (str): In the "leave one out" step, the text that the token should be replaced with.
"""
self.interpretation_separator = separator
self.interpretation_replacement = replacement
return self
def tokenize(self, x):
"""
Tokenizes an input string by dividing into "words" delimited by self.interpretation_separator
"""
tokens = x.split(self.interpretation_separator)
leave_one_out_strings = []
for index in range(len(tokens)):
leave_one_out_set = list(tokens)
if self.interpretation_replacement is None:
leave_one_out_set.pop(index)
else:
leave_one_out_set[index] = self.interpretation_replacement
leave_one_out_strings.append(
self.interpretation_separator.join(leave_one_out_set))
return tokens, leave_one_out_strings, None
def get_masked_inputs(self, tokens, binary_mask_matrix):
"""
Constructs partially-masked sentences for SHAP interpretation
"""
masked_inputs = []
for binary_mask_vector in binary_mask_matrix:
masked_input = np.array(tokens)[np.array(
binary_mask_vector, dtype=bool)]
masked_inputs.append(
self.interpretation_separator.join(masked_input))
return masked_inputs
def get_interpretation_scores(self, x, neighbors, scores, tokens, masks=None):
"""
Returns:
(List[Tuple[str, float]]): Each tuple set represents a set of characters and their corresponding interpretation score.
"""
result = []
for token, score in zip(tokens, scores):
result.append((token, score))
result.append((self.interpretation_separator, 0))
return result
def generate_sample(self):
return "Hello World"
class Number(InputComponent):
"""
Component creates a field for user to enter numeric input. Provides a number as an argument to the wrapped function.
Input type: float
Demos: tax_calculator, titanic_survival
"""
def __init__(self, default=None, label=None):
'''
Parameters:
default (float): default value.
label (str): component name in interface.
'''
self.default = default
self.test_input = default if default is not None else 1
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"number": {},
}
def preprocess(self, x):
"""
Parameters:
x (number): numeric input
Returns:
(float): number representing function input
"""
return float(x)
def preprocess_example(self, x):
"""
Returns:
(float): Number representing function input
"""
return x
def set_interpret_parameters(self, steps=3, delta=1, delta_type="percent"):
"""
Calculates interpretation scores of numeric values close to the input number.
Parameters:
steps (int): Number of nearby values to measure in each direction (above and below the input number).
delta (float): Size of step in each direction between nearby values.
delta_type (str): "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change.
"""
self.interpretation_steps = steps
self.interpretation_delta = delta
self.interpretation_delta_type = delta_type
return self
def get_interpretation_neighbors(self, x):
x = float(x)
neighbors = []
if self.interpretation_delta_type == "percent":
delta = 1.0 * self.interpretation_delta * x / 100
elif self.interpretation_delta_type == "absolute":
delta = self.interpretation_delta
negatives = (x + np.arange(-self.interpretation_steps, 0)
* delta).tolist()
positives = (x + np.arange(1, self.interpretation_steps+1)
* delta).tolist()
return negatives + positives, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[Tuple[float, float]]): Each tuple set represents a numeric value near the input and its corresponding interpretation score.
"""
interpretation = list(zip(neighbors, scores))
interpretation.insert(int(len(interpretation) / 2), [x, None])
return interpretation
def generate_sample(self):
return 1.0
class Slider(InputComponent):
"""
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float
Demos: sentence_builder, generate_tone, titanic_survival
"""
def __init__(self, minimum=0, maximum=100, step=None, default=None, label=None):
'''
Parameters:
minimum (float): minimum value for slider.
maximum (float): maximum value for slider.
step (float): increment between slider values.
default (float): default value.
label (str): component name in interface.
'''
self.minimum = minimum
self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 2)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default
self.test_input = self.default
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"minimum": self.minimum,
"maximum": self.maximum,
"step": self.step,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"slider": {},
}
def preprocess(self, x):
"""
Parameters:
x (number): numeric input
Returns:
(number): numeric input
"""
return x
def preprocess_example(self, x):
"""
Returns:
(float): Number representing function input
"""
return x
def set_interpret_parameters(self, steps=8):
"""
Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider.
Parameters:
steps (int): Number of neighboring values to measure between the minimum and maximum values of the slider range.
"""
self.interpretation_steps = steps
return self
def get_interpretation_neighbors(self, x):
return np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(), {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the score corresponding to an evenly spaced range of inputs between the minimum and maximum slider values.
"""
return scores
def generate_sample(self):
return self.maximum
class Checkbox(InputComponent):
"""
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool
Demos: sentence_builder, titanic_survival
"""
def __init__(self, default=False, label=None):
"""
Parameters:
label (str): component name in interface.
default (bool): default value.
"""
self.test_input = True
self.default = default
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"checkbox": {},
}
def preprocess(self, x):
"""
Parameters:
x (bool): boolean input
Returns:
(bool): boolean input
"""
return x
def preprocess_example(self, x):
"""
Returns:
(bool): Boolean representing function input
"""
return x
def set_interpret_parameters(self):
"""
Calculates interpretation score of the input by comparing the output against the output when the input is the inverse boolean value of x.
"""
return self
def get_interpretation_neighbors(self, x):
return [not x], {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(Tuple[float, float]): The first value represents the interpretation score if the input is False, and the second if the input is True.
"""
if x:
return scores[0], None
else:
return None, scores[0]
def generate_sample(self):
return True
class CheckboxGroup(InputComponent):
"""
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: Union[List[str], List[int]]
Demos: sentence_builder, titanic_survival, fraud_detector
"""
def __init__(self, choices, default=[], type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
default (List[str]): default selected list of options.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface.
'''
self.choices = choices
self.default = default
self.type = type
self.test_input = self.choices
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (List[str]): list of selected choices
Returns:
(Union[List[str], List[int]]): list of selected choices as strings or indices within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice in the input by comparing the output against the outputs when each choice in the input is independently either removed or added.
"""
return self
def get_interpretation_neighbors(self, x):
leave_one_out_sets = []
for choice in self.choices:
leave_one_out_set = list(x)
if choice in leave_one_out_set:
leave_one_out_set.remove(choice)
else:
leave_one_out_set.append(choice)
leave_one_out_sets.append(leave_one_out_set)
return leave_one_out_sets, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[Tuple[float, float]]): For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True.
"""
final_scores = []
for choice, score in zip(self.choices, scores):
if choice in x:
score_set = [score, None]
else:
score_set = [None, score]
final_scores.append(score_set)
return final_scores
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (List[str]])
"""
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
def generate_sample(self):
return self.choices
class Radio(InputComponent):
"""
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
Demos: sentence_builder, tax_calculator, titanic_survival
"""
def __init__(self, choices, type="value", default=None, label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
default (str): default value.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
self.default = default if default is not None else self.choices[0]
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): selected choice
Returns:
(Union[str, int]): selected choice as string or index within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected.
"""
return self
def get_interpretation_neighbors(self, x):
choices = list(self.choices)
choices.remove(x)
return choices, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the interpretation score corresponding to each choice.
"""
scores.insert(self.choices.index(x), None)
return scores
def generate_sample(self):
return self.choices[0]
class Dropdown(InputComponent):
"""
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
Demos: sentence_builder, filter_records, titanic_survival
"""
def __init__(self, choices, type="value", default=None, label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
default (str): default value.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
self.default = default if default is not None else self.choices[0]
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): selected choice
Returns:
(Union[str, int]): selected choice as string or index within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected.
"""
return self
def get_interpretation_neighbors(self, x):
choices = list(self.choices)
choices.remove(x)
return choices, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the interpretation score corresponding to each choice.
"""
scores.insert(self.choices.index(x), None)
return scores
def generate_sample(self):
return self.choices[0]
class Image(InputComponent):
"""
Component creates an image upload box with editing capabilities.
Input type: Union[numpy.array, PIL.Image, file-object]
Demos: image_classifier, image_mod, webcam, digit_classifier
"""
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None, optional=False):
'''
Parameters:
shape (Tuple[int, int]): (width, height) shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white.
invert_colors (bool): whether to invert the image as a preprocessing step.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tool (str): Tools used for editing. "editor" allows a full screen editor, "select" provides a cropping and zoom tool.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name, "filepath" returns the path directly.
label (str): component name in interface.
optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None.
'''
self.shape = shape
self.image_mode = image_mode
self.source = source
requires_permissions = source == "webcam"
self.tool = tool
self.type = type
self.optional = optional
self.invert_colors = invert_colors
self.test_input = test_data.BASE64_IMAGE
self.interpret_by_tokens = True
super().__init__(label, requires_permissions)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "shape": (28, 28), "invert_colors": True},
}
def get_template_context(self):
return {
"image_mode": self.image_mode,
"shape": self.shape,
"source": self.source,
"tool": self.tool,
"optional": self.optional,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): base64 url data
Returns:
(Union[numpy.array, PIL.Image, file-object]): image in requested format
"""
if x is None:
return x
im = processing_utils.decode_base64_to_image(x)
fmt = im.format
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
if self.shape is not None:
im = processing_utils.resize_and_crop(im, self.shape)
if self.invert_colors:
im = PIL.ImageOps.invert(im)
if self.type == "pil":
return im
elif self.type == "numpy":
return np.array(im)
elif self.type == "file" or self.type == "filepath":
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=(
"."+fmt.lower() if fmt is not None else ".png"))
im.save(file_obj.name)
if self.type == "file":
warnings.warn(
"The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.", DeprecationWarning)
return file_obj
else:
return file_obj.name
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'numpy', 'pil', 'filepath'.")
def preprocess_example(self, x):
return processing_utils.encode_file_to_base64(x)
def serialize(self, x, called_directly=False):
# if called directly, can assume it's a URL or filepath
if self.type == "filepath" or called_directly:
return processing_utils.encode_url_or_file_to_base64(x)
elif self.type == "file":
return processing_utils.encode_url_or_file_to_base64(x.name)
elif self.type in ("numpy", "pil"):
if self.type == "numpy":
x = PIL.Image.fromarray(np.uint8(x)).convert('RGB')
fmt = x.format
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=(
"."+fmt.lower() if fmt is not None else ".png"))
x.save(file_obj.name)
return processing_utils.encode_url_or_file_to_base64(file_obj.name)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'numpy', 'pil', 'filepath'.")
def set_interpret_parameters(self, segments=16):
"""
Calculates interpretation score of image subsections by splitting the image into subsections, then using a "leave one out" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value.
Parameters:
segments (int): Number of interpretation segments to split image into.
"""
self.interpretation_segments = segments
return self
def _segment_by_slic(self, x):
"""
Helper method that segments an image into superpixels using slic.
Parameters:
x: base64 representation of an image
"""
x = processing_utils.decode_base64_to_image(x)
if self.shape is not None:
x = processing_utils.resize_and_crop(x, self.shape)
resized_and_cropped_image = np.array(x)
try:
from skimage.segmentation import slic
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Error: running this interpretation for images requires scikit-image, please install it first.")
try:
segments_slic = slic(
resized_and_cropped_image, self.interpretation_segments, compactness=10,
sigma=1, start_label=1)
except TypeError: # For skimage 0.16 and older
segments_slic = slic(
resized_and_cropped_image, self.interpretation_segments, compactness=10,
sigma=1)
return segments_slic, resized_and_cropped_image
def tokenize(self, x):
"""
Segments image into tokens, masks, and leave-one-out-tokens
Parameters:
x: base64 representation of an image
Returns:
tokens: list of tokens, used by the get_masked_input() method
leave_one_out_tokens: list of left-out tokens, used by the get_interpretation_neighbors() method
masks: list of masks, used by the get_interpretation_neighbors() method
"""
segments_slic, resized_and_cropped_image = self._segment_by_slic(x)
tokens, masks, leave_one_out_tokens = [], [], []
replace_color = np.mean(resized_and_cropped_image, axis=(0, 1))
for (i, segment_value) in enumerate( | np.unique(segments_slic) | numpy.unique |
import numpy, pylab, os, sys, csv
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
pylab.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
dp='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/combinedfom.txt'
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/parsedresults/paperfom'
elkeys=['Ni', 'Fe', 'Co', 'Ce']
ellabels=elkeys
compvertsp=numpy.array([[.5, .5, 0, 0], [.5, 0, .5, 0], [0, 0, .1, .9]])
critdistp=.05
compverts=numpy.array([[.5, .37, .13, 0], [.25, 0, .25, .5]])
critdist=.05
betweenbool=True
invertbool=False
pointsize=20
opacity=.6
view_azim=-159
view_elev=18
f=open(dp, mode='r')
dr=csv.DictReader(f, delimiter='\t')
dropd={}
for l in dr:
for kr in l.keys():
k=kr.strip()
if not k in dropd.keys():
dropd[k]=[]
dropd[k]+=[myeval(l[kr].strip())]
for k in dropd.keys():
dropd[k]=numpy.array(dropd[k])
f.close()
dropd['compositions']=numpy.array([dropd[elkey] for elkey in elkeys]).T
comps=numpy.array([dropd[elkey] for elkey in elkeys]).T
gridi=30
comps30=[(a*1./gridi, b*1./gridi, c*1./gridi, (gridi-a-b-c)*1./gridi) for a in numpy.arange(0,1+gridi) for b in numpy.arange(0,1+gridi-a) for c in | numpy.arange(0,1+gridi-a-b) | numpy.arange |
from distutils.version import LooseVersion
from io import StringIO
from itertools import product
from string import ascii_lowercase
import struct
import sys
import types
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy
from scipy import stats
from scipy.optimize import OptimizeResult
import statsmodels.regression.linear_model as smlm
import statsmodels.tools as smtools
from arch.data import sp500
from arch.typing import Literal
from arch.univariate.base import ARCHModelForecast, ARCHModelResult, _align_forecast
from arch.univariate.distribution import (
GeneralizedError,
Normal,
SkewStudent,
StudentsT,
)
from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model
from arch.univariate.volatility import (
APARCH,
ARCH,
EGARCH,
FIGARCH,
GARCH,
HARCH,
ConstantVariance,
EWMAVariance,
FixedVariance,
MIDASHyperbolic,
RiskMetrics2006,
)
from arch.utility.exceptions import ConvergenceWarning, DataScaleWarning
USE_CYTHON = False
try:
import arch.univariate.recursions
USE_CYTHON = True
except ImportError:
import arch.univariate.recursions_python # noqa
if USE_CYTHON:
rec: types.ModuleType = arch.univariate.recursions
else:
rec = arch.univariate.recursions_python
try:
import matplotlib.pyplot # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
RTOL = 1e-4 if struct.calcsize("P") < 8 else 1e-6
DISPLAY: Literal["off"] = "off"
SP_LT_14 = LooseVersion(scipy.__version__) < LooseVersion("1.4")
SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna()
@pytest.fixture(scope="module", params=[True, False])
def simulated_data(request):
rs = np.random.RandomState(1)
zm = ZeroMean(volatility=GARCH(), distribution=Normal(seed=rs))
sim_data = zm.simulate(np.array([0.1, 0.1, 0.88]), 1000)
return np.asarray(sim_data.data) if request.param else sim_data.data
class TestMeanModel(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(1234)
cls.T = 1000
cls.resids = cls.rng.standard_normal(cls.T)
zm = ZeroMean()
zm.volatility = GARCH()
seed = 12345
random_state = np.random.RandomState(seed)
zm.distribution = Normal(seed=random_state)
sim_data = zm.simulate(np.array([0.1, 0.1, 0.8]), 1000)
with pytest.raises(ValueError):
zm.simulate(np.array([0.1, 0.1, 0.8]), 1000, initial_value=3.0)
date_index = pd.date_range("2000-12-31", periods=1000, freq="W")
cls.y = sim_data.data.values
cls.y_df = pd.DataFrame(
cls.y[:, None], columns=["LongVariableName"], index=date_index
)
cls.y_series = pd.Series(
cls.y, name="VeryVeryLongLongVariableName", index=date_index
)
x = cls.resids + cls.rng.standard_normal(cls.T)
cls.x = x[:, None]
cls.x_df = pd.DataFrame(cls.x, columns=["LongExogenousName"])
cls.resid_var = np.var(cls.resids)
cls.sigma2 = np.zeros_like(cls.resids)
cls.backcast = 1.0
def test_constant_mean(self):
cm = ConstantMean(self.y)
parameters = np.array([5.0, 1.0])
cm.simulate(parameters, self.T)
assert_equal(cm.num_params, 1)
with pytest.raises(ValueError):
cm.simulate(parameters, self.T, x=np.array(10))
bounds = cm.bounds()
assert_equal(bounds, [(-np.inf, np.inf)])
assert_equal(cm.constant, True)
a, b = cm.constraints()
assert_equal(a, np.empty((0, 1)))
assert_equal(b, np.empty((0,)))
assert isinstance(cm.volatility, ConstantVariance)
assert isinstance(cm.distribution, Normal)
assert cm.lags is None
res = cm.fit(disp=DISPLAY)
expected = np.array([self.y.mean(), self.y.var()])
assert_almost_equal(res.params, expected)
forecasts = res.forecast(horizon=20, start=20, reindex=False)
direct = pd.DataFrame(
index= | np.arange(self.y.shape[0]) | numpy.arange |
import pytest
import os
import numpy as np
from rail.estimation.estimator import Estimator
from tables_io.ioUtils import initializeHdf5Write, writeDictToHdf5Chunk
from tables_io.ioUtils import finalizeHdf5Write
import yaml
# this is temporary until unit test uses a definite test data set and creates
# the yaml file on the fly
test_base_yaml = 'tests/base.yaml'
def test_init_with_dict():
# test we can init with a dict we have already loaded
d = yaml.safe_load(open(test_base_yaml))['base_config']
_ = Estimator(d)
def test_initialization():
# test handling of an inexistent config input file
with pytest.raises(FileNotFoundError):
_ = Estimator(base_config='non_existent.yaml')
# assert correct instantiation based on a yaml file
_ = Estimator(base_config=test_base_yaml)
def test_loading():
assert True
def test_train_not_implemented():
fakedata = {'u': 99., 'g': 99., 'r': 99.}
with pytest.raises(NotImplementedError):
instance = Estimator(base_config=test_base_yaml)
instance.inform(fakedata)
def test_estimate_not_implemented():
fake_data = {'u': 99., 'g': 99., 'r': 99.}
with pytest.raises(NotImplementedError):
instance = Estimator(base_config=test_base_yaml)
instance.estimate(fake_data)
def test_writing(tmpdir):
instance = Estimator(test_base_yaml)
instance.zmode = 0
instance.zgrid = np.arange(0, 1, 0.2)
instance.pz_pdf = | np.ones(5) | numpy.ones |
from objects.backbone import Backbone
import numpy as np
def test_backbone_tangent_normal_binormal():
cp = np.array([[0, 0, 0], [0, 10, 0], [0, 20, 0], [0, 30, 0], [10, 30, 0], [20, 30, 0], [30, 30, 0]])
backbone = Backbone(cp, reparameterize=True)
# TODO: Figure out why binormal(0) results in nan. First you need to enable debugging breakpoints in other files.
t = np.linspace(0, 1, 100)
T = backbone.T(t)
N = backbone.N(t)
B = backbone.B(t)
assert np.all(T[0] == [0, 1, 0])
assert np.all(N[0] == [-1, 0, 0])
assert | np.all(B[0] == [0, 0, 1]) | numpy.all |
import sys, os
curr_path = os.getcwd()
if os.path.basename(curr_path) not in sys.path:
sys.path.append(os.path.dirname(os.getcwd()))
from time import sleep
from threading import Lock
import numpy as np
from scipy.spatial.transform import Rotation
from carla_msgs.msg import CarlaStatus
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Pose
from quad_sim_python_msgs.msg import QuadMotors, QuadWind, QuadState
import rclpy # https://docs.ros2.org/latest/api/rclpy/api/node.html
from rclpy.node import Node
from rclpy.time import Time, Duration
from tf2_ros import TransformException
from tf2_ros.buffer import Buffer
from tf2_ros.transform_listener import TransformListener
from quad_sim_python import Quadcopter
from rclpy_param_helper import Dict2ROS2Params, ROS2Params2Dict
quad_params = {}
# Moments of inertia:
# (e.g. from Bifilar Pendulum experiment https://arc.aiaa.org/doi/abs/10.2514/6.2007-6822)
Ixx = 0.0123
Iyy = 0.0123
Izz = 0.0224
IB = np.array([[Ixx, 0, 0 ],
[0, Iyy, 0 ],
[0, 0, Izz]]) # Inertial tensor (kg*m^2)
IRzz = 2.7e-5 # Rotor moment of inertia (kg*m^2)
quad_params["mB"] = 1.2 # mass (kg)
quad_params["g"] = 9.81 # gravity (m/s^2)
quad_params["dxm"] = 0.16 # arm length (m) - between CG and front
quad_params["dym"] = 0.16 # arm length (m) - between CG and right
quad_params["dzm"] = 0.05 # motor height (m)
quad_params["IB"] = IB
quad_params["IRzz"] = IRzz
quad_params["Cd"] = 0.1 # https://en.wikipedia.org/wiki/Drag_coefficient
quad_params["kTh"] = 1.076e-5 # thrust coeff (N/(rad/s)^2) (1.18e-7 N/RPM^2)
quad_params["kTo"] = 1.632e-7 # torque coeff (Nm/(rad/s)^2) (1.79e-9 Nm/RPM^2)
quad_params["minThr"] = 0.1*4 # Minimum total thrust
quad_params["maxThr"] = 9.18*4 # Maximum total thrust
quad_params["minWmotor"] = 75 # Minimum motor rotation speed (rad/s)
quad_params["maxWmotor"] = 925 # Maximum motor rotation speed (rad/s)
quad_params["tau"] = 0.015 # Value for second order system for Motor dynamics
quad_params["kp"] = 1.0 # Value for second order system for Motor dynamics
quad_params["damp"] = 1.0 # Value for second order system for Motor dynamics
quad_params["motorc1"] = 8.49 # w (rad/s) = cmd*c1 + c0 (cmd in %)
quad_params["motorc0"] = 74.7
# Select whether to use gyroscopic precession of the rotors in the quadcopter dynamics
# ---------------------------
# Set to False if rotor inertia isn't known (gyro precession has negigeable effect on drone dynamics)
quad_params["usePrecession"] = False
quad_params["Ts"] = 1/200 # state calculation time step (current ode settings run faster using a smaller value)
quad_params["Tp"] = 1/25 # period it publishes the current pose
quad_params["Tfs"] = 1/50 # period it publishes the full state
quad_params["orient"] = "ENU"
quad_params["target_frame"] = 'flying_sensor'
quad_params["map_frame"] = 'map'
class QuadSim(Node):
def __init__(self):
super().__init__('quadsim',
allow_undeclared_parameters=True, # necessary for using set_parameters
automatically_declare_parameters_from_overrides=True) # allows command line parameters
self.t = None
self.w_cmd_lock = Lock()
self.wind_lock = Lock()
self.sim_pub_lock = Lock()
# pos[3], quat[4], rpy[3], vel[3], vel_dot[3], omega[3], omega_dot[3]
self.curr_state = np.zeros(22, dtype='float64')
self.wind = [0,0,0]
self.prev_wind = [0,0,0]
self.get_carlastatus = self.create_subscription(
CarlaStatus,
'/carla/status',
self.get_carlastatus_cb,
1)
def get_carlastatus_cb(self, msg):
self.destroy_subscription(self.get_carlastatus) # we don't need this subscriber anymore...
# Read ROS2 parameters the user may have set
# E.g. (https://docs.ros.org/en/foxy/How-To-Guides/Node-arguments.html):
# --ros-args -p init_pose:=[0,0,0,0,0,0])
# --ros-args --params-file params.yaml
read_params = ROS2Params2Dict(self, 'quadsim', list(quad_params.keys()) + ["init_pose"])
for k,v in read_params.items():
# Update local parameters
quad_params[k] = v
# Update ROS2 parameters
Dict2ROS2Params(self, quad_params) # the controller needs to read some parameters from here
# Timer for the tf
# I couldn't find a way to receive it without using a timer
# to allow me to call lookup_transform after rclpy.spin(quad_node)
self.tf_trials = 5
self.tf_buffer = Buffer()
self.tf_listener = TransformListener(self.tf_buffer, self)
self.tf_timer = self.create_timer(1.0, self.on_tf_init_timer)
def get_tf(self, t=0.0, timeout=1.0):
try:
now = Time(nanoseconds=t)
trans = self.tf_buffer.lookup_transform(
quad_params["map_frame"],
quad_params["target_frame"],
now,
timeout=Duration(seconds=timeout))
self.get_logger().info(f'TF received {trans}')
curr_pos = [trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z]
curr_quat = [trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w]
s = trans.header.stamp.sec
ns = trans.header.stamp.nanosec
return (s + ns/1E9), curr_pos, curr_quat
except TransformException as ex:
self.get_logger().error(f'Could not transform {quad_params["map_frame"]} to {quad_params["target_frame"]}: {ex}')
def on_tf_init_timer(self):
res = self.get_tf()
if res:
self.t, init_pos, init_quat = res
init_rpy = Rotation.from_quat(init_quat).as_euler('xyz')
else:
return
if "init_pose" not in quad_params:
quad_params["init_pose"] = np.concatenate((init_pos,init_rpy))
# Update ROS2 parameters
Dict2ROS2Params(self, {"init_pose": quad_params["init_pose"]}) # the controller needs to read some parameters from here
else:
self.destroy_timer(self.tf_timer)
self.start_sim()
def on_tf_timer(self):
res = self.get_tf()
if res:
if self.sim_pub_lock.acquire(blocking=False):
self.res = res
self.sim_pub_lock.release()
def start_sim(self):
params = ROS2Params2Dict(self, 'quadsim', quad_params.keys())
init_pose = np.array(params['init_pose']) # x0, y0, z0, phi0, theta0, psi0
init_twist = np.array([0,0,0,0,0,0]) # xdot, ydot, zdot, p, q, r
init_states = | np.hstack((init_pose,init_twist)) | numpy.hstack |
import os
import numpy as np
import shutil
from src.interpolation import beta_interpolate, mass_interpolate, age_interpolate
def retrieval(mass, age, beta, retrieval_input_dir, retrieval_scratch_dir, retrieval_output_dir):
# check if mass is outside [0.1, 10.0]. also if age is outside of [0,1]
if not (0.1 <= mass <= 10.0):
raise Exception('ERROR: mass must be in [0.1, 10.0]')
if not (0.0 <= age <= 1.0):
raise Exception('ERROR: age must be in [0, 1]')
# check if already have this in ../retrieval/,
if os.path.exists(retrieval_output_dir):
dirs = [d for d in os.listdir(retrieval_output_dir) if not d.startswith('.')]
m_array_temp = [float(d.split('_')[0][1:]) for d in dirs]
t_array_temp = [float(d.split('_')[1][1:]) for d in dirs]
for i, (m, t) in enumerate(zip(m_array_temp, t_array_temp)):
if (mass == m) & (age == t):
beta_files = [f for f in os.listdir(retrieval_output_dir + dirs[i]) if not f.startswith('.')]
b_array = [float(f.split('.dat')[0]) for f in beta_files]
beta_files = | np.array(beta_files) | numpy.array |
import os
from params import args
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from models.model_factory import make_model
from os import path, mkdir, listdir
import numpy as np
| np.random.seed(1) | numpy.random.seed |
"""Processes raw features extracted from MediaPipe, and selects
the specified features for visualization.
Methods
-------
_load_json
_calc_delta
_add_delta_col
select_features
"""
import json
import random
import argparse
import numpy as np
import pandas as pd
from scipy import interpolate
from scipy.spatial.distance import cdist
def _load_json(json_file: str) -> dict:
"""Load JSON file TODO: Remove and use src.utils.load_json.
Parameters
----------
json_file : str
File path of JSON to be loaded.
Returns
-------
data : dict
Data loaded from JSON.
"""
with open(json_file, 'r') as data_file:
data = json.loads(data_file.read())
return data
def _calc_delta(col: str) -> np.ndarray:
"""Calculates delta between consecutives rows of a given column.
Parameters
----------
col : str
Column for which to calculate delta.
Returns
-------
np.ndarray
Delta of rows.
"""
return np.concatenate([[0], col[1:].values - col[:-1].values])
def _add_delta_col(df: pd.DataFrame, col: str) -> pd.DataFrame:
"""Calculate delta for a column and add it as a new column.
Parameters
----------
df : pd.DataFrame
DataFrame containing features.
col : str
Column for which to calculate delta.
Returns
-------
return_val : pd.DataFrame
DataFrame containing features with new delta column.
"""
df['delta_{}'.format(col)] = _calc_delta(df[col])
return df
def landmark_box_dist(landmark: list, hand: list) -> float:
curr_landmark = np.reshape(landmark, (3,21))
total_dist = 0
for point in curr_landmark:
hand_point = [[hand[0], hand[1]]]
landmark_point = [[point[0], point[1]]]
total_dist += cdist(hand_point, landmark_point)[0]
return total_dist/(21)
def select_features(input_filepath: str, features_to_extract: list,
interpolation_method: str = 'spline', order: int = 3,
center_on_face: bool = False, is_2d: bool = True,
scale: int = 10, drop_na: bool = True, do_interpolate: bool = False) -> pd.DataFrame:
"""Processes raw features extracted from MediaPipe/Kinect, and
selects the specified features for use during training of HMMs.
Parameters
----------
input_filepath : str
File path of raw feature data to be processed and used for
selection.
features_to_extract : list
Names of columns to be selected after processing features.
interpolation_method : str, optional, by default 'spline'
Interpolation method used to fill missing values.
order : int, optional, by default 3
Hyperparameter needed for certain interpolation methods.
center_on_face : bool, optional, by default True
Whether to center the features on the main face.
is_2d : bool, optional, by default True
Whether data is 2-dimensional.
scale : int, optional, by default 10
Raw features are scaled from 0-1, which can cause issues with
very small means and variances in HTK. Used to scale up
features.
Returns
-------
df : pd.DataFrame
Selected features
"""
data = _load_json(input_filepath)
if not data:
return None
data = {int(key): value for key, value in data.items()}
n_frames = len(data)
hands = np.zeros((2, n_frames, 5))
landmarks = np.zeros((2, n_frames, 63))
faces = np.zeros((1, n_frames, 12))
for frame in sorted(data.keys()):
if data[frame]['boxes'] is not None:
visible_hands = np.array(sorted([data[frame]['boxes'][str(i)] for i in range(len(data[frame]['boxes']))], key= lambda x:x[0]))
distances = {(i, j): cdist([hand[frame-1][:2]], [visible_hand[:2]])
for i, hand
in enumerate(hands)
for j, visible_hand
in enumerate(visible_hands)}
if len(visible_hands) == 1:
if frame == 0:
for idx in range(len(hands)):
hands[idx][frame] = visible_hands[0][:5]
else:
sorted_distances, _ = sorted(distances.items(), key=lambda t: t[1])
prev_new_hand = sorted_distances[0][0]
prev_keep_hand = prev_new_hand ^ 0b1
new_hands = sorted([visible_hands[0][:5], hands[prev_keep_hand][frame-1]], key=lambda x: x[0])
hands[:,frame,:] = new_hands
else:
visible_hand_assigned = {n: False for n in range(len(visible_hands))}
hand_assigned = {n: False for n in range(len(hands))}
new_hands = []
for grouping, _ in sorted(distances.items(), key=lambda t: t[1]):
hand, visible_hand = grouping
if not hand_assigned[hand] and not visible_hand_assigned[visible_hand]:
hand_assigned[hand] = True
visible_hand_assigned[visible_hand] = True
new_hands.append(visible_hands[visible_hand][:5])
hands[:,frame,:] = sorted(new_hands, key=lambda x: x[0])
if data[frame]['landmarks'] is not None:
if data[frame]['boxes'] is None:
raise Exception('Red Alert: Our assumption that landmarks are only provided when we have boxes is incorrect')
else:
visible_landmarks = []
for i in range(len(data[frame]['landmarks'])):
for j in range(len(data[frame]['landmarks'][str(i)])):
visible_landmarks += data[frame]['landmarks'][str(i)][str(j)]
visible_landmarks = np.array(visible_landmarks).reshape(-1, 63)
curr_hands = hands[:,frame,:]
distances = {(i, j): landmark_box_dist(landmark, hand)
for i, hand in enumerate(curr_hands)
for j, landmark in enumerate(visible_landmarks)}
if len(visible_landmarks) == 1:
if frame == 0:
for idx in range(len(landmarks)):
landmarks[idx][frame] = visible_landmarks[0]
else:
sorted_distances, _ = sorted(distances.items(), key=lambda t: t[1])
prev_new_landmark = sorted_distances[0][0]
prev_keep_landmark = prev_new_landmark ^ 0b1
landmarks[prev_new_landmark,frame,:] = visible_landmarks[0]
landmarks[prev_keep_landmark,frame,:] = landmarks[prev_keep_landmark,frame-1,:]
else:
visible_landmark_assigned = {n: False for n in range(len(visible_hands))}
curr_hand_assigned = {n: False for n in range(len(hands))}
for grouping, _ in sorted(distances.items(), key=lambda t: t[1]):
hand, visible_landmark = grouping
if not curr_hand_assigned[hand] and not visible_landmark_assigned[visible_landmark]:
curr_hand_assigned[hand] = True
visible_landmark_assigned[visible_landmark] = True
landmarks[hand, frame, :] = visible_landmarks[visible_landmark]
# if data[frame]['landmarks'] is not None:
# visible_landmarks = []
# for i in range(len(data[frame]['landmarks'])):
# for j in range(len(data[frame]['landmarks'][str(i)])):
# visible_landmarks += data[frame]['landmarks'][str(i)][str(j)]
# visible_landmarks = np.array(visible_landmarks).reshape(-1, 63)
# if len(visible_landmarks) == 1:
# landmarks[:, frame] = visible_landmarks[0]
# distances = {(i, j): cdist([landmark[frame-1]], [visible_landmark])
# for i, landmark
# in enumerate(landmarks)
# for j, visible_landmark
# in enumerate(visible_landmarks)}
# visible_landmark_assigned = {n: False for n in range(len(visible_landmarks))}
# landmark_assigned = {n: False for n in range(len(landmarks))}
# for grouping, _ in sorted(distances.items(), key=lambda t: t[1]):
# landmark, visible_landmark = grouping
# if not landmark_assigned[landmark] and not visible_landmark_assigned[visible_landmark]:
# landmark_assigned[landmark] = True
# visible_landmark_assigned[visible_landmark] = True
# landmarks[landmark][frame] = visible_landmarks[visible_landmark]
if data[frame]['faces'] is not None:
means = np.array(np.mean(np.ma.masked_equal(faces, 0), axis=1))
visible_faces = []
for i in range(len(data[frame]['faces'])):
for j in range(len(data[frame]['faces'][str(i)])):
visible_faces += data[frame]['faces'][str(i)][str(j)]
visible_faces = np.array(visible_faces).reshape(-1, 12)
for visible_face in visible_faces:
if len(faces) == 1 and not np.any(means):
faces[0, frame] = visible_face
else:
if not np.any(np.all(np.abs(means - visible_face) < 0.04, axis=1)):
new_face = np.zeros((1, n_frames, 12))
new_face[0, frame] = visible_face
faces = | np.concatenate([faces, new_face], axis=0) | numpy.concatenate |
from __future__ import print_function
# modiofied based on PythTB python tight binding module.
import numpy as np
from builtins import range, zip
from pyglib.iface.wannierio import get_wannier_data
def tb_wigner_seitz(ngrid,lat):
deg_ws = []
rvec_ws = []
ndiff = np.zeros(3)
for n0 in range(-ngrid[0], ngrid[0]+1):
for n1 in range(-ngrid[1], ngrid[1]+1):
for n2 in range(-ngrid[2], ngrid[2]+1):
dist_list = []
for i0 in [0,1,2,-1,-2]:
ndiff[0] = n0 - i0*ngrid[0]
for i1 in [0,1,2,-1,-2]:
ndiff[1] = n1 - i1*ngrid[1]
for i2 in [0,1,2,-1,-2]:
ndiff[2] = n2 - i2*ngrid[2]
dist_list.append(np.linalg.norm(ndiff.dot(lat)))
dist_list = np.asarray(dist_list)
dist_min = np.min(dist_list)
if np.abs(dist_list[0]-dist_min) < 1.e-7:
deg_ws.append(np.count_nonzero(\
np.abs(dist_list-dist_min) < 1.e-7))
rvec_ws.append(np.array([n0,n1,n2]))
# sum-rule check
deg_ws = np.array(deg_ws)
rvec_ws = np.asarray(rvec_ws)
tot1 = np.sum(1./deg_ws)
tot2 = np.prod(ngrid)
if np.abs(tot1 - tot2) > 1.e-7:
raise ValueError("error in finding wigner-seitz points {} vs {}".\
format(tot1, tot2))
return deg_ws, rvec_ws
def get_tb_hr(kpoints,rpoints,wfwans,evals):
phase_mat = np.exp(-2.j*np.pi*np.asarray(kpoints).dot(rpoints.T)) \
/len(kpoints)
hk_list = [[wfwansk1.T.conj().dot(np.diag(evalsk1)).dot(wfwansk1) \
for wfwansk1, evalsk1 in zip(wfwans1,evals1)]\
for wfwans1, evals1 in zip(wfwans,evals)]
hk_list = np.array(hk_list).swapaxes(1,2).swapaxes(2,3)
hr_list = np.tensordot(hk_list, phase_mat, axes=(3,0))
return hr_list
class tb_model(object):
r"""
This is the main class of the PythTB package which contains all
information for the tight-binding model.
:param lat: Array containing lattice vectors in Cartesian
coordinates (in arbitrary units). In example the below, the first
lattice vector has coordinates [1.0,0.5] while the second
one has coordinates [0.0,2.0]. By default, lattice vectors
are an identity matrix.
"""
def __init__(self,lat,deg_ws,rpoints,hr_list):
self._dim_k = 3
self._dim_r = 3
self._lat=np.array(lat,dtype=float)
if self._lat.shape != (self._dim_r,self._dim_r):
raise Exception("\nWrong lat array dimensions")
# check that volume is not zero and that have right handed system
if np.abs(np.linalg.det(self._lat))<1.0E-6:
raise Exception(\
"\nLattice vectors length/area/volume too"+\
" close to zero, or zero.")
if np.linalg.det(self._lat)<0.0:
raise Exception(\
"\n\nLattice vectors need to form right handed system.")
self.deg_ws = np.asarray(deg_ws)
self.rpoints = np.asarray(rpoints)
self.hr_list = np.asarray(hr_list)
self._norb = self.hr_list.shape[2]
def _gen_ham(self,kpt,isp):
"""Generate Hamiltonian for a certain k-point,
which is given in reduced coordinates!"""
phase_mat = np.exp(-2.j*np.pi*self.rpoints.dot(kpt))/self.deg_ws
ham = np.tensordot(self.hr_list[isp],phase_mat,axes=(2,0))
return ham
def _sol_ham(self,ham,eig_vectors=False):
"""Solves Hamiltonian and returns eigenvectors, eigenvalues"""
# check that matrix is hermitian
if np.max(ham-ham.T.conj())>1.0E-9:
raise Exception("\n\nHamiltonian matrix is not hermitian?!")
#solve matrix
if eig_vectors==False: # only find eigenvalues
eval = np.linalg.eigvalsh(ham)
# sort eigenvalues and convert to real numbers
eval = _nicefy_eig(eval)
return np.array(eval,dtype=float)
else: # find eigenvalues and eigenvectors
eval,eig = np.linalg.eigh(ham)
# sort evectors, eigenvalues and convert to real numbers
eval,eig = _nicefy_eig(eval,eig)
# reshape eigenvectors if doing a spinfull calculation
return eval, eig
def k_uniform_mesh(self,mesh_size):
r"""
Returns a uniform grid of k-points that can be passed to
passed to function :func:`pythtb.tb_model.solve_all`. This
function is useful for plotting density of states histogram
and similar.
Returned uniform grid of k-points always contains the origin.
:param mesh_size: Number of k-points in the mesh in each
periodic direction of the model.
:returns:
* **k_vec** -- Array of k-vectors on the mesh that can be
directly passed to function :func:`pythtb.tb_model.solve_all`.
Example usage::
# returns a 10x20x30 mesh of a tight binding model
# with three periodic directions
k_vec = my_model.k_uniform_mesh([10,20,30])
# solve model on the uniform mesh
my_model.solve_all(k_vec)
"""
# get the mesh size and checks for consistency
use_mesh=np.array(list(map(round,mesh_size)),dtype=int)
if use_mesh.shape!=(self._dim_k,):
print(use_mesh.shape)
raise Exception("\n\nIncorrect size of the specified k-mesh!")
if np.min(use_mesh)<=0:
raise Exception("\n\nMesh must have positive non-zero number of elements.")
# construct the mesh
if self._dim_k==1:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[1])
norm=norm.transpose([1,0])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,0]).reshape([use_mesh[0],1])
elif self._dim_k==2:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[2])
norm=norm.transpose([2,0,1])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,0]).reshape([use_mesh[0]*use_mesh[1],2])
elif self._dim_k==3:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1],0:use_mesh[2]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[3])
norm=norm.transpose([3,0,1,2])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,3,0]).reshape([use_mesh[0]*use_mesh[1]*use_mesh[2],3])
else:
raise Exception("\n\nUnsupported dim_k!")
return k_vec
def k_path(self,kpts,nk,report=True):
r"""
Interpolates a path in reciprocal space between specified
k-points. In 2D or 3D the k-path can consist of several
straight segments connecting high-symmetry points ("nodes"),
and the results can be used to plot the bands along this path.
The interpolated path that is returned contains as
equidistant k-points as possible.
:param kpts: Array of k-vectors in reciprocal space between
which interpolated path should be constructed. These
k-vectors must be given in reduced coordinates. As a
special case, in 1D k-space kpts may be a string:
* *"full"* -- Implies *[ 0.0, 0.5, 1.0]* (full BZ)
* *"fullc"* -- Implies *[-0.5, 0.0, 0.5]* (full BZ, centered)
* *"half"* -- Implies *[ 0.0, 0.5]* (half BZ)
:param nk: Total number of k-points to be used in making the plot.
:param report: Optional parameter specifying whether printout
is desired (default is True).
:returns:
* **k_vec** -- Array of (nearly) equidistant interpolated
k-points. The distance between the points is calculated in
the Cartesian frame, however coordinates themselves are
given in dimensionless reduced coordinates! This is done
so that this array can be directly passed to function
:func:`pythtb.tb_model.solve_all`.
* **k_dist** -- Array giving accumulated k-distance to each
k-point in the path. Unlike array *k_vec* this one has
dimensions! (Units are defined here so that for an
one-dimensional crystal with lattice constant equal to for
example *10* the length of the Brillouin zone would equal
*1/10=0.1*. In other words factors of :math:`2\pi` are
absorbed into *k*.) This array can be used to plot path in
the k-space so that the distances between the k-points in
the plot are exact.
* **k_node** -- Array giving accumulated k-distance to each
node on the path in Cartesian coordinates. This array is
typically used to plot nodes (typically special points) on
the path in k-space.
Example usage::
# Construct a path connecting four nodal points in k-space
# Path will contain 401 k-points, roughly equally spaced
path = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.5], [0.0, 0.0]]
(k_vec,k_dist,k_node) = my_model.k_path(path,401)
# solve for eigenvalues on that path
evals = tb.solve_all(k_vec)
# then use evals, k_dist, and k_node to plot bandstructure
# (see examples)
"""
# processing of special cases for kpts
if kpts=='full':
# full Brillouin zone for 1D case
k_list=np.array([[0.],[0.5],[1.]])
elif kpts=='fullc':
# centered full Brillouin zone for 1D case
k_list=np.array([[-0.5],[0.],[0.5]])
elif kpts=='half':
# half Brillouin zone for 1D case
k_list=np.array([[0.],[0.5]])
else:
k_list=np.array(kpts)
# in 1D case if path is specified as a vector, convert it to an (n,1) array
if len(k_list.shape)==1 and self._dim_k==1:
k_list=np.array([k_list]).T
# make sure that k-points in the path have correct dimension
if k_list.shape[1]!=self._dim_k:
print('input k-space dimension is',k_list.shape[1])
print('k-space dimension taken from model is',self._dim_k)
raise Exception("\n\nk-space dimensions do not match")
# must have more k-points in the path than number of nodes
if nk<k_list.shape[0]:
raise Exception("\n\nMust have more points in the path than number of nodes.")
# number of nodes
n_nodes=k_list.shape[0]
# extract the lattice vectors from the TB model
lat_per=np.copy(self._lat)
# compute k_space metric tensor
k_metric = np.linalg.inv(np.dot(lat_per,lat_per.T))
# Find distances between nodes and set k_node, which is
# accumulated distance since the start of the path
# initialize array k_node
k_node=np.zeros(n_nodes,dtype=float)
for n in range(1,n_nodes):
dk = k_list[n]-k_list[n-1]
dklen = np.sqrt(np.dot(dk,np.dot(k_metric,dk)))
k_node[n]=k_node[n-1]+dklen
# Find indices of nodes in interpolated list
node_index=[0]
for n in range(1,n_nodes-1):
frac=k_node[n]/k_node[-1]
node_index.append(int(round(frac*(nk-1))))
node_index.append(nk-1)
# initialize two arrays temporarily with zeros
# array giving accumulated k-distance to each k-point
k_dist= | np.zeros(nk,dtype=float) | numpy.zeros |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from easy21 import Easy21
GAMMA = 0.9
BATCH_SIZE = 128
LR = 0.01 # Learning rate
EPSILON = 0.1 # Greedy
N_STATES = 2
N_ACTIONS = 2
MEMORY_CAPACITY = 5000
TARGET_REPLACE_ITER = 100 # target update frequency
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATES, 20)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(20, 50)
self.fc2.weight.data.normal_(0, 0.1)
self.out = nn.Linear(50, N_ACTIONS)
self.out.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.out(x)
x = 2 * F.softmax(x, dim=1) - 1
return x
class DQN(object):
def __init__(self):
self.eval_net, self.target_net = Net().to(device), Net().to(device)
self.learn_step_counter = 0
self.memory_counter = 0
self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2))
self.optimizer = torch.optim.RMSprop(self.eval_net.parameters(), lr=LR)
self.loss_func = nn.MSELoss()
def choose_action(self, s):
s = torch.unsqueeze(torch.FloatTensor(s), 0).to(device)
if np.random.random() < EPSILON:
# Exploration
return np.random.randint(0, 2)
# Greedy, exploitation
a_values = self.eval_net.forward(s)
a = torch.argmax(torch.squeeze(a_values))
return a
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
#print(s, a, r, s_, transition)
# replace the old memory with new memory
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target parameter update
if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, :N_STATES]).to(device)
b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int)).to(device)
b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2]).to(device)
b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:]).to(device)
# q_eval w.r.t the action in experience
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate
q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def save_model(self):
torch.save(self.eval_net.state_dict(), 'params.pkl')
def DQN_learning(env):
num_episodes = 100000
realQvalue = np.load("q.npy")
dqn = DQN()
wins = 0
draws = 0
for i_episode in range(num_episodes + 1):
s, _, _ = env.reset()
ep_r = 0
while True:
a = dqn.choose_action(s)
# take action
s_, r, done = env.step(a)
dqn.store_transition(s, a, r, s_)
ep_r += r
if dqn.memory_counter > MEMORY_CAPACITY:
dqn.learn()
if done:
if ep_r == 1:
wins += 1
elif ep_r == 0:
draws += 1
if i_episode % 1000 == 0:
print('Ep: ', i_episode,
'| Ep_r: ', round(ep_r, 2), 'win:draw:lose={}:{}:{}'.format(wins, draws, 1000 - wins - draws))
wins = draws = 0
if done:
if i_episode % 10000 == 0:
M = torch.zeros((21, 10, 2)).to(device)
for i in range(21):
for j in range(10):
s = [i + 1, j + 1]
s = torch.unsqueeze(torch.FloatTensor(s), 0).to(device)
a_values = dqn.eval_net.forward(s).detach()
M[i][j] = torch.squeeze(a_values)
QvalueM = M.detach().to('cpu').numpy()
print('MSE: ', ((QvalueM - realQvalue) ** 2).mean())
if i_episode % 20000 == 0:
X = np.arange(1, 22)
Y = np.arange(1, 11)
XX, YY = X, Y = np.meshgrid(X, Y)
fig = plt.figure(dpi=200)
ax = fig.gca(projection='3d')
res = | np.max(QvalueM, axis=2) | numpy.max |
# This class defines key analytical routines for performing a 'gap-analysis'
# on EYA-estimated annual energy production (AEP) and that from operational data.
# Categories considered are availability, electrical losses, and long-term
# gross energy. The main output is a 'waterfall' plot linking the EYA-
# estimated and operational-estiamted AEP values.
import pandas as pd
import numpy as np
from tqdm import tqdm
import random
from operational_analysis.toolkits import met_data_processing
from operational_analysis.toolkits import filters
from operational_analysis.toolkits.power_curve import functions
from operational_analysis.toolkits import imputing
from operational_analysis.toolkits import timeseries
from operational_analysis import logged_method_call
from operational_analysis import logging
logger = logging.getLogger(__name__)
class TurbineLongTermGrossEnergy(object):
"""
A serial (Pandas-driven) implementation of calculating long-term gross energy
for each turbine in a wind farm. This module collects standard processing and
analysis methods for estimating this metric.
The method proceeds as follows:
1. Filter turbine data for normal operation
2. Calculate daily means of wind speed, wind direction, and air density from reanalysis products
3. Calculate daily sums of energy from each turbine
4. Fit daily data (features are atmospheric variables, response is turbine power) using a
generalized additive model (GAM)
5. Apply model results to long-term atmospheric varaibles to calculate long term
gross energy for each turbine
A Monte Carlo approach is implemented to repeat the procedure multiple times
to get a distribution of results, from which deriving uncertainty quantification
for the long-term gross energy estimate.
The end result is a table of long-term gross energy values for each turbine in the wind farm. Note
that this gross energy metric does not back out losses associated with waking or turbine performance.
Rather, gross energy in this context is what turbine would have produced under normal operation
(i.e. excluding downtime and underperformance).
Required schema of PlantData:
- _scada_freq
- reanalysis products ['merra2', 'erai', 'ncep2'] with columns ['time', 'u_ms', 'v_ms', 'windspeed_ms', 'rho_kgm-3']
- scada with columns: ['time', 'id', 'wmet_wdspd_avg', 'wtur_W_avg', 'energy_kwh']
"""
@logged_method_call
def __init__(self, plant, UQ = False, num_sim = 2000):
"""
Initialize turbine long-term gross energy analysis with data and parameters.
Args:
plant(:obj:`PlantData object`): PlantData object from which TurbineLongTermGrossEnergy should draw data.
UQ:(:obj:`bool`): choice whether to perform ('Y') or not ('N') uncertainty quantification
num_sim:(:obj:`int`): number of Monte Carlo simulations. Please note that this script is somewhat computationally heavy so the default num_sim value has been adjusted accordingly.
"""
logger.info("Initializing TurbineLongTermGrossEnergy Object")
# Check that selected UQ is allowed
if UQ == True:
logger.info("Note: uncertainty quantification will be performed in the calculation")
self.num_sim = num_sim
elif UQ == False:
logger.info("Note: uncertainty quantification will NOT be performed in the calculation")
self.num_sim = None
else:
raise ValueError("UQ has to either be True (uncertainty quantification performed, default) or False (uncertainty quantification NOT performed)")
self.UQ = UQ
self._plant = plant # Set plant as attribute of analysis object
self._turbs = self._plant._scada.df['id'].unique() # Store turbine names
# Get start and end of POR days in SCADA
self._por_start = format(plant._scada.df.index.min(), '%Y-%m-%d')
self._por_end = format(plant._scada.df.index.max(), '%Y-%m-%d')
self._full_por = pd.date_range(self._por_start, self._por_end, freq = 'D')
# Define several dictionaries and data frames to be populated within this method
self._scada_dict = {}
self._daily_reanal_dict = {}
self._model_dict = {}
self._model_results = {}
self._turb_lt_gross = {}
self._scada_daily_valid = pd.DataFrame()
# Set number of 'valid' counts required when summing data to daily values
self._num_valid_daily = 60. / (pd.to_timedelta(self._plant._scada_freq).seconds/60) * 24
# Initially sort the different turbine data into dictionary entries
logger.info("Processing SCADA data into dictionaries by turbine (this can take a while)")
self.sort_scada_by_turbine()
@logged_method_call
def run(self,reanal_subset = ['erai', 'ncep2', 'merra2'],
uncertainty_scada=0.005, wind_bin_thresh=(1, 3),
max_power_filter=(0.8, 0.9), correction_threshold=(0.85, 0.95),
enable_plotting = False,
plot_dir = None):
"""
Perform pre-processing of data into an internal representation for which the analysis can run more quickly.
Args:
reanal_subset(:obj:`list`): Which reanalysis products to use for long-term correction
uncertainty_scada(:obj:`float`): uncertainty imposed to scada data (used in UQ = True case only)
max_power_filter(:obj:`tuple`): Maximum power threshold (fraction) to which the bin filter
should be applied (default 0.85). This should be a tuple in the UQ = True case,
a single value when UQ = False.
wind_bin_thresh(:obj:`tuple`): The filter threshold for each bin (default is 2 m/s).
This should be a tuple in the UQ = True case, a single value when UQ = False.
correction_threshold(:obj:`tuple`): The threshold (fraction) above which daily scada energy data
hould be corrected (default is 0.90).
This should be a tuple in the UQ = True case, a single value when UQ = False.
enable_plotting(:obj:`boolean`): Indicate whether to output plots
plot_dir(:obj:`string`): Location to save figures
Returns:
(None)
"""
# Assign parameters as object attributes
self.enable_plotting = enable_plotting
self.plot_dir = plot_dir
self._reanal = reanal_subset # Reanalysis data to consider in fitting
# Check uncertainty types
vars = [wind_bin_thresh, max_power_filter, correction_threshold]
expected_type = float if self.UQ == False else tuple
for var in vars:
assert type(var) == expected_type, f"wind_bin_thresh, max_power_filter, correction_threshold must all be {expected_type} for UQ={self.UQ}"
# Define relevant uncertainties, to be applied in Monte Carlo sampling
self.uncertainty_wind_bin_thresh = np.array(wind_bin_thresh, dtype=np.float64)
self.uncertainty_max_power_filter = np.array(max_power_filter, dtype=np.float64)
self.uncertainty_correction_threshold = np.array(correction_threshold, dtype=np.float64)
if self.UQ == True:
self.uncertainty_scada = uncertainty_scada
self.setup_inputs()
# Loop through number of simulations, store TIE results
for n in tqdm(np.arange(self.num_sim)):
self._run = self._inputs.loc[n]
# MC-sampled parameter in this function!
logger.info("Filtering turbine data")
self.filter_turbine_data() # Filter turbine data
if self.enable_plotting:
logger.info("Plotting filtered power curves")
self.plot_filtered_power_curves(self.plot_dir)
# MC-sampled parameter in this function!
logger.info("Processing reanalysis data to daily averages")
self.setup_daily_reanalysis_data() # Setup daily reanalysis products
# MC-sampled parameter in this function!
logger.info("Processing scada data to daily sums")
self.filter_sum_impute_scada() # Setup daily scada data
logger.info("Setting up daily data for model fitting")
self.setup_model_dict() # Setup daily data to be fit using the GAM
# MC-sampled parameter in this function!
logger.info("Fitting model data")
self.fit_model() # Fit daily turbine energy to atmospheric data
logger.info("Applying fitting results to calculate long-term gross energy")
self.apply_model_to_lt(n) # Apply fitting result to long-term reanalysis data
if self.enable_plotting:
logger.info("Plotting daily fitted power curves")
self.plot_daily_fitting_result(self.plot_dir) # Setup daily reanalysis products
# Log the completion of the run
logger.info("Run completed")
def setup_inputs(self):
"""
Create and populate the data frame defining the simulation parameters.
This data frame is stored as self._inputs
Args:
(None)
Returns:
(None)
"""
if self.UQ == True:
reanal_list = list(np.repeat(self._reanal, self.num_sim)) # Create extra long list of renanalysis product names to sample from
inputs = {
"reanalysis_product": np.asarray(random.sample(reanal_list, self.num_sim)),
"scada_data_fraction": | np.random.normal(1, self.uncertainty_scada, self.num_sim) | numpy.random.normal |
#!/usr/bin/env python
## Copyright 2002 by PyMMLib Development Group, http://pymmlib.sourceforge.net/
## This code is part of the PyMMLib distribution and governed by
## its license. Please see the LICENSE_pymmlib file that should have been
## included as part of this package.
"""Symmetry operations as functions on vectors or arrays.
"""
import numpy
## 64 unique rotation matricies
Rot_Z_mY_X = numpy.array([[ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0], [ 1.0, 0.0, 0.0]], float)
Rot_Y_mX_mZ = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_XmY_X_mZ = numpy.array([[ 1.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mX_Y_mZ = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_X_mZ_Y = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0], [ 0.0, 1.0, 0.0]], float)
Rot_Y_mXY_Z = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Y_mX_Z = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_XmY_X_Z = numpy.array([[ 1.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mX_mXY_mZ = numpy.array([[-1.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_Y_Z_X = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0], [ 1.0, 0.0, 0.0]], float)
Rot_mY_mZ_X = numpy.array([[ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0], [ 1.0, 0.0, 0.0]], float)
Rot_X_Z_mY = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0]], float)
Rot_XmY_mY_Z = numpy.array([[ 1.0,-1.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Y_X_mZ = numpy.array([[ 0.0, 1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_Y_mZ_X = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0], [ 1.0, 0.0, 0.0]], float)
Rot_mXY_Y_Z = numpy.array([[-1.0, 1.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mX_mY_mZ = numpy.array([[-1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_X_Y_mZ = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mXY_mX_Z = numpy.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mZ_mY_mX = numpy.array([[ 0.0, 0.0,-1.0], [ 0.0,-1.0, 0.0], [-1.0, 0.0, 0.0]], float)
Rot_X_mZ_mY = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0], [ 0.0,-1.0, 0.0]], float)
Rot_X_Y_Z = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mY_mX_mZ = numpy.array([[ 0.0,-1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mY_X_Z = numpy.array([[ 0.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Z_X_Y = numpy.array([[ 0.0, 0.0, 1.0], [ 1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0]], float)
Rot_X_XmY_Z = numpy.array([[ 1.0, 0.0, 0.0], [ 1.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mY_X_mZ = numpy.array([[ 0.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mY_Z_mX = numpy.array([[ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], float)
Rot_mY_Z_X = numpy.array([[ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0], [ 1.0, 0.0, 0.0]], float)
Rot_mX_mZ_mY = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0], [ 0.0,-1.0, 0.0]], float)
Rot_mX_Z_Y = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0], [ 0.0, 1.0, 0.0]], float)
Rot_mZ_mX_mY = numpy.array([[ 0.0, 0.0,-1.0], [-1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0]], float)
Rot_X_XmY_mZ = numpy.array([[ 1.0, 0.0, 0.0], [ 1.0,-1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mY_XmY_mZ = numpy.array([[ 0.0,-1.0, 0.0], [ 1.0,-1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_Z_X_mY = numpy.array([[ 0.0, 0.0, 1.0], [ 1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0]], float)
Rot_mZ_mY_X = numpy.array([[ 0.0, 0.0,-1.0], [ 0.0,-1.0, 0.0], [ 1.0, 0.0, 0.0]], float)
Rot_X_Z_Y = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0], [ 0.0, 1.0, 0.0]], float)
Rot_Z_mX_mY = numpy.array([[ 0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0]], float)
Rot_mX_Z_mY = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0]], float)
Rot_X_mY_Z = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mY_mX_Z = numpy.array([[ 0.0,-1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Z_mY_mX = numpy.array([[ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0], [-1.0, 0.0, 0.0]], float)
Rot_mX_mY_Z = numpy.array([[-1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Z_Y_X = numpy.array([[ 0.0, 0.0, 1.0], [ 0.0, 1.0, 0.0], [ 1.0, 0.0, 0.0]], float)
Rot_mZ_Y_mX = numpy.array([[ 0.0, 0.0,-1.0], [ 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], float)
Rot_Y_Z_mX = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], float)
Rot_mY_XmY_Z = numpy.array([[ 0.0,-1.0, 0.0], [ 1.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mXY_Y_mZ = numpy.array([[-1.0, 1.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mZ_mX_Y = numpy.array([[ 0.0, 0.0,-1.0], [-1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0]], float)
Rot_mX_mZ_Y = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0], [ 0.0, 1.0, 0.0]], float)
Rot_mX_Y_Z = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_X_mY_mZ = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mZ_X_Y = numpy.array([[ 0.0, 0.0,-1.0], [ 1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0]], float)
Rot_Y_mZ_mX = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0], [-1.0, 0.0, 0.0]], float)
Rot_mY_mZ_mX = | numpy.array([[ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0], [-1.0, 0.0, 0.0]], float) | numpy.array |
import functools
import itertools
import re
import sys
import warnings
import threading
import operator
import numpy as np
import unittest
from numba import typeof, njit
from numba.core import types, typing, utils
from numba.core.compiler import compile_isolated, Flags, DEFAULT_FLAGS
from numba.np.numpy_support import from_dtype
from numba import jit, vectorize
from numba.core.errors import LoweringError, TypingError
from numba.tests.support import TestCase, CompilationCache, MemoryLeakMixin, tag
from numba.core.typing.npydecl import supported_ufuncs, all_ufuncs
from numba.np import numpy_support
from numba.core.registry import cpu_target
from numba.core.base import BaseContext
from numba.np import ufunc_db
is32bits = tuple.__itemsize__ == 4
iswindows = sys.platform.startswith('win32')
# NOTE: to test the implementation of Numpy ufuncs, we disable rewriting
# of array expressions.
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
enable_pyobj_flags.no_rewrites = True
no_pyobj_flags = Flags()
no_pyobj_flags.no_rewrites = True
enable_nrt_flags = Flags()
enable_nrt_flags.nrt = True
enable_nrt_flags.no_rewrites = True
def _unimplemented(func):
"""An 'expectedFailure' like decorator that only expects compilation errors
caused by unimplemented functions that fail in no-python mode"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except TypingError:
raise unittest._ExpectedFailure(sys.exc_info())
raise unittest._UnexpectedSuccess
def _make_ufunc_usecase(ufunc):
ldict = {}
arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)])
func_str = 'def fn({0}):\n np.{1}({0})'.format(arg_str, ufunc.__name__)
exec(func_str, globals(), ldict)
fn = ldict['fn']
fn.__name__ = '{0}_usecase'.format(ufunc.__name__)
return fn
def _make_unary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x):\n return {0}(x)".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_binary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x,y):\n return x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_inplace_ufunc_op_usecase(ufunc_op):
"""Generates a function to be compiled that performs an inplace operation
ufunc_op can be a string like '+=' or a function like operator.iadd
"""
if isinstance(ufunc_op, str):
ldict = {}
exec("def fn(x,y):\n x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
else:
def inplace_op(x, y):
ufunc_op(x, y)
fn = inplace_op
return fn
def _as_dtype_value(tyargs, args):
"""Convert python values into numpy scalar objects.
"""
return [np.dtype(str(ty)).type(val) for ty, val in zip(tyargs, args)]
class BaseUFuncTest(MemoryLeakMixin):
def setUp(self):
super(BaseUFuncTest, self).setUp()
self.inputs = [
(np.uint32(0), types.uint32),
(np.uint32(1), types.uint32),
(np.int32(-1), types.int32),
(np.int32(0), types.int32),
(np.int32(1), types.int32),
(np.uint64(0), types.uint64),
(np.uint64(1), types.uint64),
(np.int64(-1), types.int64),
(np.int64(0), types.int64),
(np.int64(1), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(0.0), types.float32),
(np.float32(0.5), types.float32),
(np.float64(-0.5), types.float64),
(np.float64(0.0), types.float64),
(np.float64(0.5), types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C')),
(np.array([0,1], dtype=np.int8), types.Array(types.int8, 1, 'C')),
(np.array([0,1], dtype=np.int16), types.Array(types.int16, 1, 'C')),
(np.array([0,1], dtype=np.uint8), types.Array(types.uint8, 1, 'C')),
(np.array([0,1], dtype=np.uint16), types.Array(types.uint16, 1, 'C')),
]
self.cache = CompilationCache()
def _determine_output_type(self, input_type, int_output_type=None,
float_output_type=None):
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
return output_type
class TestUFuncs(BaseUFuncTest, TestCase):
def basic_ufunc_test(self, ufunc, flags=no_pyobj_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None,
kinds='ifc', positive_only=False):
# Necessary to avoid some Numpy warnings being silenced, despite
# the simplefilter() call below.
self.reset_module_warnings(__name__)
pyfunc = _make_ufunc_usecase(ufunc)
inputs = list(self.inputs) + additional_inputs
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
is_tuple = isinstance(input_operand, tuple)
if is_tuple:
args = input_operand
else:
args = (input_operand,) * ufunc.nin
if input_type in skip_inputs:
continue
if positive_only and np.any(args[0] < 0):
continue
# Some ufuncs don't allow all kinds of arguments
if (args[0].dtype.kind not in kinds):
continue
output_type = self._determine_output_type(
input_type, int_output_type, float_output_type)
input_types = (input_type,) * ufunc.nin
output_types = (output_type,) * ufunc.nout
cr = self.cache.compile(pyfunc, input_types + output_types,
flags=flags)
cfunc = cr.entry_point
if isinstance(args[0], np.ndarray):
results = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
else:
results = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
invalid_flag = False
with warnings.catch_warnings(record=True) as warnlist:
warnings.simplefilter('always')
pyfunc(*args, *expected)
warnmsg = "invalid value encountered"
for thiswarn in warnlist:
if (issubclass(thiswarn.category, RuntimeWarning)
and str(thiswarn.message).startswith(warnmsg)):
invalid_flag = True
cfunc(*args, *results)
for expected_i, result_i in zip(expected, results):
msg = '\n'.join(["ufunc '{0}' failed",
"inputs ({1}):", "{2}",
"got({3})", "{4}",
"expected ({5}):", "{6}"
]).format(ufunc.__name__,
input_type, input_operand,
output_type, result_i,
expected_i.dtype, expected_i)
try:
np.testing.assert_array_almost_equal(
expected_i, result_i,
decimal=5,
err_msg=msg)
except AssertionError:
if invalid_flag:
# Allow output to mismatch for invalid input
print("Output mismatch for invalid input",
input_tuple, result_i, expected_i)
else:
raise
def basic_int_ufunc_test(self, name=None, flags=no_pyobj_flags):
self.basic_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
############################################################################
# Math operations
def test_add_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.add, flags=flags)
def test_subtract_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.subtract, flags=flags)
def test_multiply_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.multiply, flags=flags)
def test_divide_ufunc(self, flags=no_pyobj_flags):
# Bear in mind that in python3 divide IS true_divide
# so the out type for int types will be a double
int_out_type = None
int_out_type = types.float64
self.basic_ufunc_test(np.divide, flags=flags, int_output_type=int_out_type)
def test_logaddexp_ufunc(self):
self.basic_ufunc_test(np.logaddexp, kinds='f')
def test_logaddexp2_ufunc(self):
self.basic_ufunc_test(np.logaddexp2, kinds='f')
def test_true_divide_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.true_divide, flags=flags, int_output_type=types.float64)
def test_floor_divide_ufunc(self):
self.basic_ufunc_test(np.floor_divide)
def test_negative_ufunc(self, flags=no_pyobj_flags):
# NumPy ufunc has bug with uint32 as input and int64 as output,
# so skip uint32 input.
self.basic_ufunc_test(np.negative, int_output_type=types.int64,
skip_inputs=[types.Array(types.uint32, 1, 'C'), types.uint32],
flags=flags)
def test_positive_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.positive, flags=flags)
def test_power_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.power, flags=flags,
positive_only=True)
def test_float_power_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.float_power, flags=flags, kinds="fc")
def test_gcd_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.gcd, flags=flags, kinds="iu")
def test_lcm_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.lcm, flags=flags, kinds="iu")
def test_remainder_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.remainder, flags=flags)
def test_mod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.mod, flags=flags, kinds='ifcu',
additional_inputs = [
((np.uint64(np.iinfo(np.uint64).max), np.uint64(16)), types.uint64)
])
def test_fmod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmod, flags=flags)
def test_abs_ufunc(self, flags=no_pyobj_flags, ufunc=np.abs):
self.basic_ufunc_test(ufunc, flags=flags,
additional_inputs = [
(np.uint32(np.iinfo(np.uint32).max), types.uint32),
(np.uint64(np.iinfo(np.uint64).max), types.uint64),
(np.float32(np.finfo(np.float32).min), types.float32),
(np.float64(np.finfo(np.float64).min), types.float64)
])
def test_absolute_ufunc(self, flags=no_pyobj_flags):
self.test_abs_ufunc(flags=flags, ufunc=np.absolute)
def test_fabs_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fabs, flags=flags, kinds='f')
def test_rint_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rint, flags=flags, kinds='cf')
def test_sign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sign, flags=flags)
def test_conj_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conj, flags=flags)
def test_exp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp, flags=flags, kinds='cf')
def test_exp2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp2, flags=flags, kinds='cf')
def test_log_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log, flags=flags, kinds='cf')
def test_log2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log2, flags=flags, kinds='cf')
def test_log10_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log10, flags=flags, kinds='cf')
def test_expm1_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.expm1, flags=flags, kinds='cf')
def test_log1p_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log1p, flags=flags, kinds='cf')
def test_sqrt_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sqrt, flags=flags, kinds='cf')
def test_square_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.square, flags=flags)
def test_cbrt_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cbrt, flags=flags, kinds='f')
def test_reciprocal_ufunc(self, flags=no_pyobj_flags):
# reciprocal for integers doesn't make much sense and is problematic
# in the case of division by zero, as an inf will overflow float to
# int conversions, which is undefined behavior.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.reciprocal, skip_inputs=to_skip, flags=flags)
def test_conjugate_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conjugate, flags=flags)
############################################################################
# Trigonometric Functions
def test_sin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sin, flags=flags, kinds='cf')
def test_cos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cos, flags=flags, kinds='cf')
def test_tan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tan, flags=flags, kinds='cf')
def test_arcsin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsin, flags=flags, kinds='cf')
def test_arccos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccos, flags=flags, kinds='cf')
def test_arctan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan, flags=flags, kinds='cf')
def test_arctan2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan2, flags=flags, kinds='cf')
def test_hypot_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.hypot, kinds='f')
def test_sinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sinh, flags=flags, kinds='cf')
def test_cosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cosh, flags=flags, kinds='cf')
def test_tanh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tanh, flags=flags, kinds='cf')
def test_arcsinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsinh, flags=flags, kinds='cf')
def test_arccosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccosh, flags=flags, kinds='cf')
def test_arctanh_ufunc(self, flags=no_pyobj_flags):
# arctanh is only valid is only finite in the range ]-1, 1[
# This means that for any of the integer types it will produce
# conversion from infinity/-infinity to integer. That's undefined
# behavior in C, so the results may vary from implementation to
# implementation. This means that the result from the compiler
# used to compile NumPy may differ from the result generated by
# llvm. Skipping the integer types in this test avoids failed
# tests because of this.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.arctanh, skip_inputs=to_skip, flags=flags,
kinds='cf')
def test_deg2rad_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.deg2rad, flags=flags, kinds='f')
def test_rad2deg_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rad2deg, flags=flags, kinds='f')
def test_degrees_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.degrees, flags=flags, kinds='f')
def test_radians_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.radians, flags=flags, kinds='f')
############################################################################
# Bit-twiddling Functions
def test_bitwise_and_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_and, flags=flags)
def test_bitwise_or_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_or, flags=flags)
def test_bitwise_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_xor, flags=flags)
def test_invert_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.invert, flags=flags)
def test_bitwise_not_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_not, flags=flags)
# Note: there is no entry for left_shift and right_shift as this harness
# is not valid for them. This is so because left_shift and right
# shift implementation in NumPy has undefined behavior (in C-parlance)
# when the second argument is a negative (or bigger than the number
# of bits) value.
# Also, right_shift for negative first arguments also relies on
# implementation defined behavior, although numba warantees "sane"
# behavior (arithmetic shifts on signed integers, logic shifts on
# unsigned integers).
############################################################################
# Comparison functions
def test_greater_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater, flags=flags)
def test_greater_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater_equal, flags=flags)
def test_less_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less, flags=flags)
def test_less_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less_equal, flags=flags)
def test_not_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.not_equal, flags=flags)
def test_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.equal, flags=flags)
def test_logical_and_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_and, flags=flags)
def test_logical_or_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_or, flags=flags)
def test_logical_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_xor, flags=flags)
def test_logical_not_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_not, flags=flags)
def test_maximum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.maximum, flags=flags)
def test_minimum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.minimum, flags=flags)
def test_fmax_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmax, flags=flags)
def test_fmin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmin, flags=flags)
############################################################################
# Floating functions
def bool_additional_inputs(self):
return [
(np.array([True, False], dtype=np.bool_),
types.Array(types.bool_, 1, 'C')),
]
def test_isfinite_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isfinite, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isinf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isinf, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isnan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isnan, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_signbit_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.signbit, flags=flags)
def test_copysign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.copysign, flags=flags, kinds='f')
def test_nextafter_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.nextafter, flags=flags, kinds='f')
@_unimplemented
def test_modf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.modf, flags=flags, kinds='f')
# Note: there is no entry for ldexp as this harness isn't valid for this
# ufunc. this is so because ldexp requires heterogeneous inputs.
# However, this ufunc is tested by the TestLoopTypes test classes.
@_unimplemented
def test_frexp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.frexp, flags=flags, kinds='f')
def test_floor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.floor, flags=flags, kinds='f')
def test_ceil_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.ceil, flags=flags, kinds='f')
def test_trunc_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.trunc, flags=flags, kinds='f')
def test_spacing_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.spacing, flags=flags, kinds='f')
############################################################################
# Other tests
def binary_ufunc_mixed_types_test(self, ufunc, flags=no_pyobj_flags):
ufunc_name = ufunc.__name__
ufunc = _make_ufunc_usecase(ufunc)
inputs1 = [
(1, types.uint64),
(-1, types.int64),
(0.5, types.float64),
(np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
inputs2 = inputs1
output_types = [types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 1, 'C')]
pyfunc = ufunc
for input1, input2, output_type in itertools.product(inputs1, inputs2, output_types):
input1_operand = input1[0]
input1_type = input1[1]
input2_operand = input2[0]
input2_type = input2[1]
# Skip division by unsigned int because of NumPy bugs
if ufunc_name == 'divide' and (input2_type == types.Array(types.uint32, 1, 'C') or
input2_type == types.Array(types.uint64, 1, 'C')):
continue
# Skip some subtraction tests because of NumPy bugs
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint32 and types.Array(types.int64, 1, 'C'):
continue
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint64 and types.Array(types.int64, 1, 'C'):
continue
if ((isinstance(input1_type, types.Array) or
isinstance(input2_type, types.Array)) and
not isinstance(output_type, types.Array)):
continue
cr = self.cache.compile(pyfunc,
(input1_type, input2_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input1_operand, np.ndarray):
result = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
elif isinstance(input2_operand, np.ndarray):
result = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input1_operand, input2_operand, result)
pyfunc(input1_operand, input2_operand, expected)
scalar_type = getattr(output_type, 'dtype', output_type)
prec = ('single'
if scalar_type in (types.float32, types.complex64)
else 'double')
self.assertPreciseEqual(expected, result, prec=prec)
def test_broadcasting(self):
# Test unary ufunc
pyfunc = _make_ufunc_usecase(np.negative)
input_operands = [
np.arange(3, dtype='i8'),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3*3, dtype='i8').reshape(3,3)]
output_operands = [
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3)]
for x, result in zip(input_operands, output_operands):
input_type = types.Array(types.uint64, x.ndim, 'C')
output_type = types.Array(types.int64, result.ndim, 'C')
cr = self.cache.compile(pyfunc, (input_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.zeros(result.shape, dtype=result.dtype)
np.negative(x, expected)
cfunc(x, result)
self.assertPreciseEqual(result, expected)
# Test binary ufunc
pyfunc = _make_ufunc_usecase(np.add)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
output_type = types.Array(types.uint64, max(x.ndim, y.ndim), 'C')
cr = self.cache.compile(pyfunc, (input1_type, input2_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = np.zeros(expected.shape, dtype='u8')
cfunc(x, y, result)
self.assertPreciseEqual(result, expected)
def test_implicit_output_npm(self):
with self.assertRaises(TypeError):
def myadd(a0, a1):
return np.add(a0, a1)
arr_ty = types.Array(types.uint64, 1, 'C')
cr = compile_isolated(myadd, (arr_ty, arr_ty),
flags=no_pyobj_flags)
def test_broadcast_implicit_output_npm_nrt(self):
def pyfunc(a0, a1):
return np.add(a0, a1)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
| np.arange(3, dtype='u8') | numpy.arange |
import subprocess
lib_list = ['numpy','csv','seaborn','matplotlib']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
################################################################################
###### Fetch the result data and plot out the PDF and mean+/-STD figures #######
################################################################################
# Function of reading data
def LumenRead(path,numfile):
resultlist = np.empty((0,39), float)
for i in range(numfile):
filename = os.path.join(path,('lumen_area_000'+str("{0:0=3d}".format(i))+'.csv'))
reader = csv.reader(open(filename, "r"), delimiter='\t')
x = list(reader)
result = np.array(x[0][:-1]).astype("float")
resultlist = np.append(resultlist, np.expand_dims(result,axis=0), axis=0)
# print(resultlist.shape)
return resultlist
# Set Directory and number of instance in the UQ campaign
data_root = './UQtest/A/'
time_step = 361
# Read the all subdirectory of UQ instances in a list
data_list = [os.path.join(data_root, item) for item in sorted(os.listdir(data_root))]
#Create an empty list and fetch the data in an loop
lumen_list = np.empty((0,time_step,39), float)
for item in data_list:
print('Processing:',item)
Data = LumenRead(item,time_step)
lumen_list = np.append(lumen_list,np.expand_dims(Data,axis=0),axis=0)
print(lumen_list.shape)
np.save('LumenData',lumen_list)
# Calculate the lumen volume from (lumen_area_of_each_slice*depth_of_slice)
lumen_list = np.load('LumenData.npy')
lumen_vol = np.sum(lumen_list[:,:,:],axis=2) * 0.03125
fig = plt.figure()
# plt.plot(np.ones(128)*3.56055,np.linspace(0.0,2.5,128),label='0 days',c='k')
sns.kdeplot(lumen_vol[:,72],label='3 days')
sns.kdeplot(lumen_vol[:,144],label='6 days')
sns.kdeplot(lumen_vol[:,216],label='9 days')
sns.kdeplot(lumen_vol[:,288],label='12 days')
sns.kdeplot(lumen_vol[:,360],label='15 days')
plt.legend(fontsize=10,loc=2)
plt.xlabel('Lumen volume of blood vessel ($mm^3$)',fontsize=12)
plt.ylabel('Probability density function',fontsize=12)
plt.savefig('./'+'pdf.png')
plt.clf()
# plot mean+/-STD
days = | np.zeros(361) | numpy.zeros |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = | np.random.rand(shape.rows, shape.cols) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 18:53:22 2021
@author: <NAME>
"""
import argparse
import numpy as np
from zdm import zdm
#import pcosmic
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from scipy import interpolate
import matplotlib
from pkg_resources import resource_filename
import os
import sys
import scipy as sp
import time
from matplotlib.ticker import NullFormatter
from zdm import iteration as it
from zdm import survey
from zdm import cosmology as cos
from zdm import pcosmic
from zdm import beams
from zdm import misc_functions
import pickle
np.seterr(divide='ignore')
####setting up the initial grid and plotting some stuff####
setH0=67.74
cos.set_cosmology(H0=setH0)
# get the grid of p(DM|z)
zDMgrid, zvals,dmvals,H0=misc_functions.get_zdm_grid(H0=setH0,new=True,plot=False,method='analytic')
Wbins=10
Wscale=2
Nbeams=[20,20,20] #Full beam NOT Std
thresh=0
method=2
Wlogmean=1.70267
Wlogsigma=0.899148
sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys/')
lat50=survey.survey()
lat50.process_survey_file(sdir+'CRAFT_class_I_and_II.dat')
DMhalo=50
lat50.init_DMEG(DMhalo)
lat50.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(lat50,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=lat50.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=lat50.wplist
ics=survey.survey()
ics.process_survey_file(sdir+'CRAFT_ICS.dat')
DMhalo=50
ics.init_DMEG(DMhalo)
ics.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ics,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=ics.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=ics.wplist
pks=survey.survey()
pks.process_survey_file(sdir+'parkes_mb_class_I_and_II.dat')
DMhalo=50
pks.init_DMEG(DMhalo)
pks.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(pks,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=pks.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=pks.wplist
ICS892=survey.survey()
ICS892.process_survey_file(sdir+'CRAFT_ICS_892.dat')
ICS892.init_DMEG(DMhalo)
ICS892.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ICS892,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies892=ICS892.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
surveys=[lat50,ics,ICS892,pks]
#updated best-fit values
alpha_method=0
logmean=2.11
logsigma=0.53
alpha=1.55
gamma=-1.09
Emax=10**(41.7)
Emin=10**(30)
sfr_n=1.67
C=3.188
#alpha_method=1
#Emin=10**30
#Emax =10**41.40
#alpha =-0.66
#gamma = -1.01
#sfr_n= 0.73
#logmean=2.18
#logsigma=0.48
#C=2.36 ##it.GetFirstConstantEstimate(grids,surveys,pset)
pset=[np.log10(float(Emin)),np.log10(float(Emax)),alpha,gamma,sfr_n,logmean,logsigma,C,setH0]
it.print_pset(pset)
grids=misc_functions.initialise_grids(surveys,zDMgrid, zvals,dmvals,pset,wdist=True,source_evolution=0,alpha_method=0)
plots=False
zmax=[0.6,1,1,3]
DMmax=[1500,2000,2000,3000]
zmax2=[0.75,1,1,3]
DMmax2=[1000,2000,2000,4000]
if plots:
for i in range (len(surveys)):
grid=grids[i]
sv=surveys[i]
pcosmic.plot_mean(zvals,'mean_DM.pdf')
#misc_functions.plot_efficiencies(lat50)
misc_functions.plot_zdm_basic_paper(grid.grid,grid.zvals,grid.dmvals,zmax=3,DMmax=3000,
name='Plots/p_dm_z_grid_image.pdf',norm=1,log=True,
label='$\\log_{10}p(DM_{\\rm EG}|z)$',
conts=[0.16,0.5,0.88],title='Grid at H0 '+str(i),
H0=setH0,showplot=True)
misc_functions.plot_zdm_basic_paper(grid.smear_grid,grid.zvals,grid.dmvals,zmax=3,
DMmax=3000,norm=1,log=True,
ylabel='${\\rm DM_{\\rm EG}}$',
label='$\\log_{10} p({\\rm DM_{cosmic}+DM_{host}}|z)$',
conts=[0.023, 0.159,0.5,0.841,0.977],
title='Smear grid at H0 '+str(i),H0=setH0,
showplot=True)
misc_functions.plot_grid_2(grid.pdv,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
name='Plots/pdv.pdf',norm=2,log=True
,label='$p(DM_{\\rm EG},z)dV$ [Mpc$^3$]',
title="Pdv at H0" + str(i),showplot=True)
muDM=10**pset[5]
Macquart=muDM
misc_functions.plot_grid_2(grid.rates,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
norm=2,log=True,label='$\\log_{10} p({\\rm DM}_{\\rm EG},z)$',
project=False,FRBDM=sv.DMEGs,FRBZ=None,Aconts=[0.01,0.1,0.5],
Macquart=Macquart,title="H0 value "+str(i),H0= setH0,showplot=True)
misc_functions.make_dm_redshift(grid,
DMmax=DMmax2[i],zmax=zmax2[i],loc='upper right',Macquart=Macquart,
H0=setH0,showplot=True)
print ("initial grid setup done")
scanoverH0=False
# just testing....should NOT be used (update_grid routine should not be modified)
if scanoverH0:
for k in range (len(surveys)):
grid=grids[k]
sv=surveys[k]
###### shows how to do a 1D scan of parameter values #######
pset=[np.log10(float(grid.Emin)),np.log10(float(grid.Emax)),grid.alpha,grid.gamma,grid.sfr_n,grid.smear_mean,grid.smear_sigma,C,grid.H0]
#lEmaxs=np.linspace(40,44,21)
#lscan,lllist,expected=it.scan_likelihoods_1D(grid,pset,lat50,1,lEmaxs,norm=True)
#print (lscan, lllist, expected)
#misc_functions.plot_1d(lEmaxs,lscan,'$E_{\\rm max}$','Plots/test_lik_fn_emax.pdf')
#for H0
t0=time.process_time()
H0iter=np.linspace(50,100,4)
lscanH0,lllistH0,expectedH0=it.scan_likelihoods_1D(grid,pset,sv,8,H0iter,norm=True)
misc_functions.plot_1d(H0iter,lscanH0,'$H_{\\rm 0}$','Plots/test_lik_fn_emax.pdf')
t1=time.process_time()
print (lscanH0,"done")
print ("Took ",t1-t0,"seconds")
def scan_H0(H0_start,H0_stop,n_iterations,surveys,plots=False):
"""Routine for scanning over H0 values in 1D"""
t0=time.process_time()
H0values=np.linspace(H0_start,H0_stop,n_iterations)
H0likes=[]
for i in H0values:
setH0=i
cos.set_cosmology(H0=setH0)
zDMgrid, zvals,dmvals,H0=misc_functions.get_zdm_grid(H0=setH0,new=True,plot=False,
method='analytic')
mean=10**2.16
sigma=10**0.51
logmean= | np.log10(mean) | numpy.log10 |
import copy
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from multiagent.multi_discrete import MultiDiscrete
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, scenario, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None,
discrete_action_space=False,
discrete_action_input=False):
self.world = world
self.scenario = scenario
self.agents = self.world.policy_agents
self.num_agents = len(self.agents)
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
# environment parameters
# self.discrete_action_space = True
self.discrete_action_space = discrete_action_space
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
# self.discrete_action_input = False
self.discrete_action_input = discrete_action_input
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
if self.discrete_action_space:
self.action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
self.action_space = spaces.Box(low=-1., high=+1., shape=(world.dim_p,), dtype=np.float32)
if self.scenario.shared_obs:
obs_dim = len(observation_callback(self.world))
else:
obs_dim = len(observation_callback(self.world)[0])
self.observation_space = spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32)
# rendering
self.viewer = None
self._reset_render()
def step(self, action_n):
action_n = copy.deepcopy(action_n)
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space)
# advance world state
self.world.step()
# record observation for each agent
obs = self._get_obs()
reward_n = self._get_reward()
done = self._get_done()
info = self._get_info()
# all agents get total reward in cooperative case
if self.shared_reward:
reward = np.mean(reward_n)
reward_n = [reward] * self.n
return obs, reward_n, done, info
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
self.agents = self.world.policy_agents
obs = self._get_obs()
return obs
# get info used for benchmarking
def _get_info(self):
if self.info_callback is None:
return {}
return self.info_callback(self.world)
# get observation for a particular agent
def _get_obs(self):
if self.observation_callback is None:
return None
return self.observation_callback(self.world)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self):
if self.done_callback is None:
return np.array([False]*self.num_agents)
return self.done_callback(self.world)
# get reward for a particular agent
def _get_reward(self):
if self.reward_callback is None:
return None
return self.reward_callback(self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = | np.zeros(self.world.dim_p) | numpy.zeros |
import os
import matplotlib.pyplot as plt
import numpy as np
import serpentTools as st
def reactivity(keff, keffError):
reactivityArray = (keff - 1) / keff * 1e5
reactivityArray_error = keffError / keff * 1e5
return [reactivityArray, reactivityArray_error]
def OLSfit(x, y, dy=None):
"""Find the best fitting parameters of a linear fit to the data through the
method of ordinary least squares estimation. (i.e. find m and b for
y = m*x + b)
Args:
x: Numpy array of independent variable data
y: Numpy array of dependent variable data. Must have same size as x.
dy: Numpy array of dependent variable standard deviations. Must be same
size as y.
Returns: A list with four floating point values. [m, dm, b, db]
"""
if dy is None:
# if no error bars, weight every point the same
dy = np.ones(x.size)
denom = np.sum(1 / dy ** 2) * | np.sum((x / dy) ** 2) | numpy.sum |
#Fourier Coefficient PCA vizualizer using Dash
# Relevant references
# Dash tutorial: https://www.youtube.com/watch?v=hSPmj7mK6ng&ab_channel=CharmingData
# Did similar layout to: https://github.com/plotly/dash-svm/blob/master/app.py
#Plotting
import dash
import dash_core_components as dcc
import dash_html_components as html
import utils.dash_reusable_components as drc
from dash.dependencies import Input,Output,State
import plotly.express as px
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import math
import sys
from model_framekwork import model_loader, model_prediction
#Import the model
if(len(sys.argv)>2):
filename = sys.argv[1]
else:
filename = 'most_recent_model.csv'
model = model_loader(filename)
#Initialize the app
app = dash.Dash(__name__)
server = app.server
#Setup the Required Math
#--------------------------------------------------------------------------
#Load the numpy array from memory that contains the fourier coefficients
Ξ = np.load('UM - fourier coefficient matrix.npy')
print("Ξ shape is" + str(Ξ.shape))
G = np.load('UM - lambda Gram matrix.npy')
#Verify that the G matrix is at least positive semidefinite
#To be psd or pd, G=G^T
assert(np.linalg.norm(G-G.T)<1e-7)
#Diagonalize the matrix G as G = OVO
eig, O = np.linalg.eigh(G)
V = np.diagflat(eig)
#Additionally, all the eigenvalues are true
for e in eig:
assert (e>=0)
assert( e>0) # pd
# Verify that it diagonalized correctly G = O (eig) O.T
assert(np.linalg.norm(G - O @ V @ O.T)< 1e-7 * np.linalg.norm(G)) # passes
# print(np.linalg.norm(G - O @ V @ O.T)) # good
# print(np.linalg.norm(G - O.T @ V @ O)) # false
# print(np.linalg.norm(G - sum([O[:,[i]]@ O[:,[i]].T * eig[i] for i in range(len(eig))]))) # good
# print(np.linalg.norm(G)) # false
#This is based on the equation in eq:Qdef
# Q G Q = I
Q = sum([O[:,[i]] @ O[:,[i]].T * 1/np.sqrt(eig[i]) for i in range(len(eig))])
Qinv = sum([O[:,[i]] @ O[:,[i]].T * np.sqrt(eig[i]) for i in range(len(eig))])
#Change of basis conversions
def param_to_orthonormal(ξ):
return Qinv @ ξ
def param_from_orthonormal(ξ):
return Q @ ξ
def matrix_to_orthonormal(Ξ):
return Ξ @ Qinv
# IF we had to calculate G by hand:
# G = sum ( R_p.T @ R_p)
# prime G = I = Q G Q = sum (Q @ R_p.T @ R_p @ Q)
# Lambda_hat(x) = Lambda(x) @ Q
# theta = Lambda(x) * ξ = Lambda_hat(x) [Q^{-1} ξ]
ξ_avg = np.mean(Ξ, axis=0)
#Substract the average row
Ξ0 = Ξ - ξ_avg
#Calculate the coefficients in the orthonormal space
Ξ0prime = matrix_to_orthonormal(Ξ0)
#The following two lines show incorrect conversions
#Ξ0prime = Qinv @ Ξ0
#This is incorrect since Ξ is matrix version of ξ that is transposed
#Ξ0prime = Ξ0 @ Q
#This is incorrect since prime is in the orthonormal space
#Get the covariance matrix for this
Σ = Ξ0prime.T @ Ξ0prime / (Ξ0prime.shape[0]-1)
#Calculate the eigendecomposition of the covariance matrix
ψinverted, Uinverted = np.linalg.eigh(Σ)
#Eigenvalues are received from smalles to bigger, make it bigger to smaller
ψs = np.flip(ψinverted)
Ψ = np.diagflat(ψs)
#If we change the eigenvalues we also need to change the eigenvectors
U = np.flip(Uinverted, axis=1)
#Run tests to make sure that this is working
assert(np.linalg.norm(Σ - U @ Ψ @ U.T)< 1e-7 * np.linalg.norm(Σ)) # passes
for i in range(len(ψs)-1):
assert(ψs[i] > ψs[i+1])
#Define the amount principles axis that we want
η = 6
ss = []
#Convert from the new basis back to the original basis vectors
for i in range (0,η):
ss.append(param_from_orthonormal(U[:,i]*np.sqrt(ψs[i])))
#ss.append(param_from_orthonormal(U[:,i]))
# print("At i = {}".format(i))
# print("Shape of the Si is")
# print(ss[i-1].shape)
# print("The shape of Ui is")
# print(U[:,-i].shape)
"""
"""
# print("ξ average")
# print(ξ_avg.shape)
# print("Test sum")
#test = ss[0] + ss[1] + ss[2] + ss[3]
# print(test)
# print(test.shape)
# print("shape of Q")
# print(Q.shape)
# print("Shape of Qinv")
# print(Qinv.shape)
#Make sure that our new basis meets the criteria that we had
#To be more specific, make sure that the rms error is close to one for
#different unit vectors
num_params=ξ_avg.shape[0]
#Recreate the phi and step length inputs
phi=np.linspace(0,1,150)#.reshape(1,150)
step_length_array=np.full((150,),1)
for i in range(num_params):
ξtest = np.zeros(num_params)
ξtest[i] = 1
ξtest_converted = param_from_orthonormal(ξtest)
test_deviation_function = model(ξtest_converted,phi,step_length_array)
#print(str(test_deviation_function.size))
test_rmse = np.sqrt(np.mean(np.square(test_deviation_function)))
#print(ξtest)
#print(test_rmse)
#assert(1-1e-3 < abs(test_rmse) < 1+1e-3)
#--------------------------------------------------------------------------
#Setup the visualization
#--------------------------------------------------------------------------
#Define an axis for every pca axis that we have up to η
pca_input_list=[Input('pca'+str(i),'value') for i in range(η)]
#Update the cummulative variance graph, should only run once
def update_variance_graph():
return px.line(y=np.cumsum([0]+list((ψs[0:η])/sum(ψs))), x=range(η+1), title='Cumulative Sum of variance', labels={'x': 'pca axis', 'y': 'percentage of variance covered'})
#Configure the sliders for step length
pca_sliders=[]
marker_step = 6
marker_min = 0.8
marker_range = 1.2
slider_marks1 = {i: "{0:.2f}".format(i) for i in np.append([0],np.linspace(marker_min, marker_range, num=marker_step))}
#print(slider_marks1)
pca_sliders+=[drc.NamedSlider(
name='Step length (meters)',
id='step-length',
min=.8,
max=1.2,
step=.4/marker_step,
marks=slider_marks1,
value = 1
)]
#Configure the sliders for phase dot
marker_steps = 10
marker_min = 0.2
marker_max = 2
slider_marks2 = {i: "{0:.2f}".format(i) for i in np.append([0], | np.linspace(marker_min, marker_max, num=marker_steps) | numpy.linspace |
"""Contains a set of misc. useful tools for the compressive learning toolbox"""
import numpy as np
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
############################
# DATASET GENERATION TOOLS #
############################
def generatedataset_GMM(d,K,n,output_required='dataset',balanced=True,normalize=None,**generation_params):
"""
Generate a synthetic dataset according to a Gaussian Mixture Model distribution.
Parameters
----------
d: int, the dataset dimension
K: int, the number of Gaussian modes
n: int, the number of elements in the dataset (cardinality)
output_required: string (default='dataset'), specifies the required outputs (see below). Available options:
- 'dataset': returns X, the dataset;
- 'GMM': returns (X,GMM), where GMM = (weigths,means,covariances) is a tuple describing the generating mixture;
- 'labels': returns (X,y), the dataset and the associated labels (e.g., for classification)
- 'all': returns (X,y,GMM)
balanced: - bool (default=True), if True the Gaussians have the same weights, or
- real (must be > 0.), stength of weight imbalance (~0 is very imbalanced, > K is fairly balanced)
normalize: string (default=None), if not None describes how to normalize the dataset. Available options:
- 'l_2-unit-ball': the dataset is scaled in the l_2 unit ball (i.e., all l_2 norms are <= 1)
- 'l_inf-unit-ball': the dataset is projected in the l_inf unit ball (i.e., all entries are <= 1)
Returns
-------
out: array-like or tuple, a combination of the following items (see desciption of output_required):
- X: (n,d)-numpy array containing the samples; only output by default
- weigths: (K,)-numpy array containing the weigthing factors of the Gaussians
- means: (K,d)-numpy array containing the means of the Gaussians
- covariances: (K,d,d)-numpy array containing the covariance matrices of the Gaussians
- y: (n,)-numpy array containing the labels (from 0 to K, one per mode) associated with the items in X
Other Parameters
----------------
TODO UPDATE THIS
isotropic: bool (default=True), if True each Gaussian has covariance of type scalar*Identity
"""
## STEP 0: Parse input generation parameters
# Default generation parameters
_gen_params = {
'separation_scale': (10/np.sqrt(d)), # Separation of the Gaussians
'separation_min': 0, # Before norm
'covariance_variability_inter': 8., # between clusters
'covariance_variability_intra': 3., # inside one mode
'all_covariance_scaling': 0.1}
# Check the inputs, if it's a valid parameter overwrite it in the internal parameters dict "_gen_params"
for param_name in generation_params:
if param_name in _gen_params.keys():
_gen_params[param_name] = generation_params[param_name]
else:
raise ValueError('Unrecognized parameter: {}'.format(param_name))
if _gen_params['separation_min'] > 2 * _gen_params['separation_scale']:
print("WARNING: minimum separation too close to typical separation scale, finding separated clusters might be hard")
## STEP 1: generate the weights of the Gaussian modes
# Convert input to a "randomness strength"
if isinstance(balanced,bool):
weight_perturbation_strength = 0. if balanced else 3.
else:
weight_perturbation_strength = 1./balanced
# Generate random weigths, normalize
weights = np.ones(K) + weight_perturbation_strength*np.random.rand(K)
weights /= np.sum(weights)
# Avoid almost empty classes
minweight = min(0.005,(K-1)/(n-1)) # Some minimum weight to avoid empty classes
weights[np.where(weights < minweight)[0]] = minweight
## STEP 2: Draw the assignations of each of the vectors to assign
y = np.random.choice(K,n,p=weights)
## STEP 3: Fill the dataset
# Pre-allocate memory
X = np.empty((n,d))
means = np.empty((K,d))
covariances = np.empty((K,d,d))
# Loop over the modes and generate each Gaussian
for k in range(K):
# Generate mean for this mode
successful_mu_generation = False
while not successful_mu_generation:
mu_this_mode = _gen_params['separation_scale']*np.random.randn(d)
if k == 0 or _gen_params['separation_min'] == 0:
successful_mu_generation = True
else:
distance_to_closest_mode = min(np.linalg.norm(mu_this_mode - mu_other) for mu_other in means[:k])
successful_mu_generation = distance_to_closest_mode > _gen_params['separation_min']
# Generate covariance for this mode
scale_variance_this_mode = 1. + np.random.uniform(0,_gen_params['covariance_variability_inter'])
scale_variance_this_mode *= _gen_params['all_covariance_scaling'] # take into account global scaling
unscaled_variances_this_mode = np.ones(d) + np.random.uniform(0,_gen_params['covariance_variability_intra'],d)
Sigma_this_mode = scale_variance_this_mode*np.diag(unscaled_variances_this_mode)
# Save the mean and covariance
means[k] = mu_this_mode
covariances[k] = Sigma_this_mode
# Get the indices we have to fill
indices_for_this_mode = np.where(y == k)[0]
nb_samples_in_this_mode = indices_for_this_mode.size
# Fill the dataset with samples drawn from the current mode
X[indices_for_this_mode] = np.random.multivariate_normal(mu_this_mode, Sigma_this_mode, nb_samples_in_this_mode)
## STEP 4: If needed, normalize the dataset
if normalize is not None:
if normalize in ['l_2-unit-ball']:
maxNorm = np.linalg.norm(X,axis=1).max() + 1e-6 # plus smth to have
elif normalize in ['l_inf-unit-ball']:
maxNorm = np.abs(X).max() + 1e-6
else:
raise Exception('Unreckognized normalization method ({}). Aborting.'.format(normalize))
# Normalize by maxNorm
X /= maxNorm
means /= maxNorm
covariances /= maxNorm**2
## STEP 5: output
if output_required == 'dataset':
out = X
elif output_required == 'GMM':
out = (X,(weights,means,covariances))
elif output_required == 'labels':
out = (X,y)
elif output_required == 'all':
out = (X,y,(weights,means,covariances))
else:
raise ValueError('Unrecognized output_required ({})'.format(output_required))
return out
def generateCirclesDataset(K,n,normalize):
"""
Generate a synthetic 2-D dataset comprising concentric circles/shells.
Parameters
----------
K: int, the number of circles modes
n: int, the number of elements in the dataset (cardinality)
normalize: string (default=None), if not None describes how to normalize the dataset. Available options:
- 'l_2-unit-ball': the dataset is scaled in the l_2 unit ball (i.e., all l_2 norms are <= 1)
- 'l_inf-unit-ball': the dataset is projected in the l_inf unit ball (i.e., all entries are <= 1)
Returns
-------
out: X: (n,d)-numpy array containing the samples.
"""
weigths = np.ones(K)/K # True, ideal weigths (balanced case)
classSizes = np.ones(K) # Actual samples per class
# (note: we enforce that weigths is the *actual* proportions in this dataset)
## Select number of samples of each mode
balanced = True # FOR NOW,TODO CHANGE LATER
if balanced:
classSizes[:-1] = int(n/K)
classSizes[-1] = n - (K-1)*int(n/K) # ensure we have exactly n samples in dataset even if n % K != 0
else:
minweight = min(0.01,(K-1)/(n-1)) # Some minimum weight to avoid empty classes
weigths = np.random.uniform(minweight,1,K)
weigths = weigths/np.sum(weigths) # Normalize
classSizes[:-1] = (weigths[:-1]*n).astype(int)
classSizes[-1] = n - | np.sum(classSizes[:-1]) | numpy.sum |
import numpy as np
from pyvf.utils import CreateHexGrid,CreateClumpsGrid,CreateSqGrid,stem_map_from_table
class Treelist:
def __init__(self,tpa,vf_species,dbh,height=None,tree_id=None,structure=None,condition=None,sp_name=None,sp_id=None,cubic=None,scribner=None,biomass=None):
"""This class stores location independent tree records for a single stand and time period. Input 1D lists or arrays of equal size.
Arguments:
tpa: list of trees per acre values (must use acres)
vf_species: list of 2-3 letter vf species codes
dbh: list of dbh values (diameter at breast height in inches)
height: list of tree height values in feet
tree_id: list of custom ID values. Purely for organization.
structure: list of strings representing a "structure" in VF. For example "S1", "S2", "S3", etc
condition: list of strings representing tree condition. Valid options are "GREEN", "SNAG", "STUMP", "BURN1", "BURN2", "BURN3"
sp_name: list of strings representing recorded/displayed species name
sp_id: list of strings representing recorded/displayed species ID
cubic: list of values representing tree cubic volume (cubic feet)
scribner: list of values representing tree scribner volume
biomass: list of values representing tree biomass
"""
self.tpa=np.array(tpa).astype(float)
self.vf_species=np.array(vf_species)
self.dbh=np.array(dbh).astype(float)
n=self.tpa.shape
self.height=np.full(n,np.nan)
self.tree_id=np.full(n,np.nan)
self.structure=np.full(n,np.nan)
self.condition = np.full(n,np.nan)
self.sp_name = np.full(n,np.nan)
self.sp_id = np.full(n,np.nan)
self.cubic = np.full(n,np.nan)
self.scribner = np.full(n,np.nan)
self.biomass = np.full(n,np.nan)
if height is not None:
self.height=np.array(height).astype(float)
if tree_id is not None:
self.tree_id=np.array(tree_id).astype(str)
if structure is not None:
self.structure= | np.array(structure) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
from regions import CircleSkyRegion
from gammapy.catalog import SourceCatalog3FHL
from gammapy.data import GTI
from gammapy.datasets import Datasets, MapDataset, MapDatasetOnOff
from gammapy.datasets.map import MapEvaluator, RAD_AXIS_DEFAULT
from gammapy.irf import (
EDispKernelMap,
EDispMap,
EnergyDispersion2D,
EffectiveAreaTable2D,
EnergyDependentMultiGaussPSF,
PSFMap,
PSFKernel,
)
from gammapy.makers.utils import make_map_exposure_true_energy, make_psf_map
from gammapy.maps import (
Map,
MapAxis,
WcsGeom,
WcsNDMap,
RegionGeom,
RegionNDMap,
HpxGeom,
)
from gammapy.modeling import Fit
from gammapy.modeling.models import (
FoVBackgroundModel,
GaussianSpatialModel,
Models,
PointSpatialModel,
PowerLawSpectralModel,
SkyModel,
ConstantSpectralModel,
DiskSpatialModel,
)
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
from gammapy.utils.gauss import Gauss2DPDF
@pytest.fixture
def geom_hpx():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=4, name="energy_true"
)
geom = HpxGeom.create(nside=32, axes=[axis], frame="galactic")
return {"geom": geom, "energy_axis_true": energy_axis_true}
@pytest.fixture
def geom_hpx_partial():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=4, name="energy_true"
)
geom = HpxGeom.create(
nside=32, axes=[axis], frame="galactic", region="DISK(110.,75.,10.)"
)
return {"geom": geom, "energy_axis_true": energy_axis_true}
@pytest.fixture
def geom():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_etrue():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3, name="energy_true")
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_image():
energy = np.logspace(-1.0, 1.0, 2)
axis = MapAxis.from_edges(energy, name="energy", unit=u.TeV, interp="log")
return WcsGeom.create(
skydir=(0, 0), binsz=0.02, width=(2, 2), frame="galactic", axes=[axis]
)
def get_exposure(geom_etrue):
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
aeff = EffectiveAreaTable2D.read(filename, hdu="EFFECTIVE AREA")
exposure_map = make_map_exposure_true_energy(
pointing=SkyCoord(1, 0.5, unit="deg", frame="galactic"),
livetime=1 * u.hr,
aeff=aeff,
geom=geom_etrue,
)
return exposure_map
def get_psf():
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
geom = WcsGeom.create(
skydir=(0, 0),
frame="galactic",
binsz=2,
width=(2, 2),
axes=[RAD_AXIS_DEFAULT, psf.axes["energy_true"]],
)
return make_psf_map(
psf=psf,
pointing=SkyCoord(0, 0.5, unit="deg", frame="galactic"),
geom=geom,
exposure_map=Map.from_geom(geom.squash("rad"), unit="cm2 s"),
)
@requires_data()
def get_edisp(geom, geom_etrue):
filename = "$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz"
edisp2d = EnergyDispersion2D.read(filename, hdu="EDISP")
energy = geom.axes["energy"].edges
energy_true = geom_etrue.axes["energy_true"].edges
edisp_kernel = edisp2d.to_edisp_kernel(
offset="1.2 deg", energy=energy, energy_true=energy_true
)
edisp = EDispKernelMap.from_edisp_kernel(edisp_kernel)
return edisp
@pytest.fixture
def sky_model():
spatial_model = GaussianSpatialModel(
lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.2 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
return SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name="test-model"
)
def get_map_dataset(geom, geom_etrue, edisp="edispmap", name="test", **kwargs):
"""Returns a MapDatasets"""
# define background model
background = Map.from_geom(geom)
background.data += 0.2
psf = get_psf()
exposure = get_exposure(geom_etrue)
e_reco = geom.axes["energy"]
e_true = geom_etrue.axes["energy_true"]
if edisp == "edispmap":
edisp = EDispMap.from_diagonal_response(energy_axis_true=e_true)
data = exposure.get_spectrum(geom.center_skydir).data
edisp.exposure_map.data = np.repeat(data, 2, axis=-1)
elif edisp == "edispkernelmap":
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=e_reco, energy_axis_true=e_true
)
data = exposure.get_spectrum(geom.center_skydir).data
edisp.exposure_map.data = np.repeat(data, 2, axis=-1)
else:
edisp = None
# define fit mask
center = SkyCoord("0.2 deg", "0.1 deg", frame="galactic")
circle = CircleSkyRegion(center=center, radius=1 * u.deg)
mask_fit = geom.region_mask([circle])
models = FoVBackgroundModel(dataset_name=name)
return MapDataset(
models=models,
exposure=exposure,
background=background,
psf=psf,
edisp=edisp,
mask_fit=mask_fit,
name=name,
**kwargs,
)
@requires_data()
def test_map_dataset_str(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
assert "MapDataset" in str(dataset)
assert "(frozen)" in str(dataset)
assert "background" in str(dataset)
dataset.mask_safe = None
assert "MapDataset" in str(dataset)
def test_map_dataset_str_empty():
dataset = MapDataset()
assert "MapDataset" in str(dataset)
@requires_data()
def test_fake(sky_model, geom, geom_etrue):
"""Test the fake dataset"""
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert np.all(npred.data >= 0) # npred must be positive
dataset.counts = npred
real_dataset = dataset.copy()
dataset.fake(314)
assert real_dataset.counts.data.shape == dataset.counts.data.shape
assert_allclose(real_dataset.counts.data.sum(), 9525.299054, rtol=1e-5)
assert_allclose(dataset.counts.data.sum(), 9711)
@requires_data()
def test_different_exposure_unit(sky_model, geom):
energy_range_true = np.logspace(2, 4, 3)
axis = MapAxis.from_edges(
energy_range_true, name="energy_true", unit="GeV", interp="log"
)
geom_gev = geom.to_image().to_cube([axis])
dataset = get_map_dataset(geom, geom_gev, edisp="None")
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert_allclose(npred.data[0, 50, 50], 6.086019, rtol=1e-2)
@pytest.mark.parametrize(("edisp_mode"), ["edispmap", "edispkernelmap"])
@requires_data()
def test_to_spectrum_dataset(sky_model, geom, geom_etrue, edisp_mode):
dataset_ref = get_map_dataset(geom, geom_etrue, edisp=edisp_mode)
bkg_model = FoVBackgroundModel(dataset_name=dataset_ref.name)
dataset_ref.models = [sky_model, bkg_model]
dataset_ref.counts = dataset_ref.npred_background() * 0.0
dataset_ref.counts.data[1, 50, 50] = 1
dataset_ref.counts.data[1, 60, 50] = 1
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset_ref.gti = gti
on_region = CircleSkyRegion(center=geom.center_skydir, radius=0.05 * u.deg)
spectrum_dataset = dataset_ref.to_spectrum_dataset(on_region)
spectrum_dataset_corrected = dataset_ref.to_spectrum_dataset(
on_region, containment_correction=True
)
mask = np.ones_like(dataset_ref.counts, dtype="bool")
mask[1, 40:60, 40:60] = 0
dataset_ref.mask_safe = Map.from_geom(dataset_ref.counts.geom, data=mask)
spectrum_dataset_mask = dataset_ref.to_spectrum_dataset(on_region)
assert np.sum(spectrum_dataset.counts.data) == 1
assert spectrum_dataset.data_shape == (2, 1, 1)
assert spectrum_dataset.background.geom.axes[0].nbin == 2
assert spectrum_dataset.exposure.geom.axes[0].nbin == 3
assert spectrum_dataset.exposure.unit == "m2s"
energy_axis = geom.axes["energy"]
assert (
spectrum_dataset.edisp.get_edisp_kernel(energy_axis=energy_axis)
.axes["energy"]
.nbin
== 2
)
assert (
spectrum_dataset.edisp.get_edisp_kernel(energy_axis=energy_axis)
.axes["energy_true"]
.nbin
== 3
)
assert_allclose(spectrum_dataset.edisp.exposure_map.data[1], 3.070917e09, rtol=1e-5)
assert np.sum(spectrum_dataset_mask.counts.data) == 0
assert spectrum_dataset_mask.data_shape == (2, 1, 1)
assert spectrum_dataset_corrected.exposure.unit == "m2s"
assert_allclose(spectrum_dataset.exposure.data[1], 3.070884e09, rtol=1e-5)
assert_allclose(spectrum_dataset_corrected.exposure.data[1], 2.05201e09, rtol=1e-5)
@requires_data()
def test_info_dict(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["exposure_min"].value, 8.32e8, rtol=1e-3)
assert_allclose(info_dict["exposure_max"].value, 1.105e10, rtol=1e-3)
assert info_dict["exposure_max"].unit == "m2 s"
assert info_dict["name"] == "test"
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["sqrt_ts"], 74.024180, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["ontime"].value, 3600)
assert info_dict["name"] == "test"
def get_fermi_3fhl_gc_dataset():
counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz")
background = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz"
)
bkg_model = FoVBackgroundModel(dataset_name="fermi-3fhl-gc")
exposure = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz"
)
return MapDataset(
counts=counts,
background=background,
models=[bkg_model],
exposure=exposure,
name="fermi-3fhl-gc",
)
@requires_data()
def test_resample_energy_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
new_axis = MapAxis.from_edges([10, 35, 100] * u.GeV, interp="log", name="energy")
grouped = dataset.resample_energy_axis(energy_axis=new_axis)
assert grouped.counts.data.shape == (2, 200, 400)
assert grouped.counts.data[0].sum() == 28581
assert_allclose(
grouped.npred_background().data.sum(axis=(1, 2)),
[25074.366386, 2194.298612],
rtol=1e-5,
)
assert_allclose(grouped.exposure.data, dataset.exposure.data, rtol=1e-5)
axis = grouped.counts.geom.axes[0]
npred = dataset.npred()
npred_grouped = grouped.npred()
assert_allclose(npred.resample_axis(axis=axis).data.sum(), npred_grouped.data.sum())
@requires_data()
def test_to_image_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
dataset_im = dataset.to_image()
assert dataset_im.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(dataset_im.npred_background().data.sum(), 28548.625, rtol=1e-5)
assert_allclose(dataset_im.exposure.data, dataset.exposure.data, rtol=1e-5)
npred = dataset.npred()
npred_im = dataset_im.npred()
assert_allclose(npred.data.sum(), npred_im.data.sum())
def test_to_image_mask_safe():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.5, width=(1, 1), frame="icrs", axes=[axis]
)
dataset = MapDataset.create(geom)
# Check map_safe handling
data = np.array([[[False, True], [True, True]], [[False, False], [True, True]]])
dataset.mask_safe = WcsNDMap.from_geom(geom=geom, data=data)
dataset_im = dataset.to_image()
assert dataset_im.mask_safe.data.dtype == bool
desired = np.array([[False, True], [True, True]])
assert (dataset_im.mask_safe.data == desired).all()
# Check that missing entries in the dataset do not break
dataset_copy = dataset.copy()
dataset_copy.exposure = None
dataset_im = dataset_copy.to_image()
assert dataset_im.exposure is None
dataset_copy = dataset.copy()
dataset_copy.counts = None
dataset_im = dataset_copy.to_image()
assert dataset_im.counts is None
@requires_data()
def test_downsample():
dataset = get_fermi_3fhl_gc_dataset()
downsampled = dataset.downsample(2)
assert downsampled.counts.data.shape == (11, 100, 200)
assert downsampled.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(
downsampled.npred_background().data.sum(axis=(1, 2)),
dataset.npred_background().data.sum(axis=(1, 2)),
rtol=1e-5,
)
assert_allclose(downsampled.exposure.data[5, 50, 100], 3.318082e11, rtol=1e-5)
with pytest.raises(ValueError):
dataset.downsample(2, axis_name="energy")
@requires_data()
def test_map_dataset_fits_io(tmp_path, sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
hdulist = dataset.to_hdulist()
actual = [hdu.name for hdu in hdulist]
desired = [
"PRIMARY",
"COUNTS",
"COUNTS_BANDS",
"EXPOSURE",
"EXPOSURE_BANDS",
"BACKGROUND",
"BACKGROUND_BANDS",
"EDISP",
"EDISP_BANDS",
"EDISP_EXPOSURE",
"EDISP_EXPOSURE_BANDS",
"PSF",
"PSF_BANDS",
"PSF_EXPOSURE",
"PSF_EXPOSURE_BANDS",
"MASK_SAFE",
"MASK_SAFE_BANDS",
"MASK_FIT",
"MASK_FIT_BANDS",
"GTI",
]
assert actual == desired
dataset.write(tmp_path / "test.fits")
dataset_new = MapDataset.read(tmp_path / "test.fits")
assert dataset_new.mask.data.dtype == bool
assert_allclose(dataset.counts.data, dataset_new.counts.data)
assert_allclose(
dataset.npred_background().data, dataset_new.npred_background().data
)
assert_allclose(dataset.edisp.edisp_map.data, dataset_new.edisp.edisp_map.data)
assert_allclose(dataset.psf.psf_map.data, dataset_new.psf.psf_map.data)
assert_allclose(dataset.exposure.data, dataset_new.exposure.data)
assert_allclose(dataset.mask_fit.data, dataset_new.mask_fit.data)
assert_allclose(dataset.mask_safe.data, dataset_new.mask_safe.data)
assert dataset.counts.geom == dataset_new.counts.geom
assert dataset.exposure.geom == dataset_new.exposure.geom
assert_allclose(dataset.exposure.meta["livetime"], 1 * u.h)
assert dataset.npred_background().geom == dataset_new.npred_background().geom
assert dataset.edisp.edisp_map.geom == dataset_new.edisp.edisp_map.geom
assert_allclose(
dataset.gti.time_sum.to_value("s"), dataset_new.gti.time_sum.to_value("s")
)
# To test io of psf and edisp map
stacked = MapDataset.create(geom)
stacked.write(tmp_path / "test-2.fits", overwrite=True)
stacked1 = MapDataset.read(tmp_path / "test-2.fits")
assert stacked1.psf.psf_map is not None
assert stacked1.psf.exposure_map is not None
assert stacked1.edisp.edisp_map is not None
assert stacked1.edisp.exposure_map is not None
assert stacked.mask.data.dtype == bool
assert_allclose(stacked1.psf.psf_map, stacked.psf.psf_map)
assert_allclose(stacked1.edisp.edisp_map, stacked.edisp.edisp_map)
@requires_dependency("iminuit")
@requires_dependency("matplotlib")
@requires_data()
def test_map_fit(sky_model, geom, geom_etrue):
dataset_1 = get_map_dataset(geom, geom_etrue, name="test-1")
dataset_2 = get_map_dataset(geom, geom_etrue, name="test-2")
datasets = Datasets([dataset_1, dataset_2])
models = Models(datasets.models)
models.insert(0, sky_model)
models["test-1-bkg"].spectral_model.norm.value = 0.5
models["test-model"].spatial_model.sigma.frozen = True
datasets.models = models
dataset_2.counts = dataset_2.npred()
dataset_1.counts = dataset_1.npred()
models["test-1-bkg"].spectral_model.norm.value = 0.49
models["test-2-bkg"].spectral_model.norm.value = 0.99
fit = Fit()
result = fit.run(datasets=datasets)
result = result["optimize_result"]
assert result.success
assert "minuit" in repr(result)
npred = dataset_1.npred().data.sum()
assert_allclose(npred, 7525.790688, rtol=1e-3)
assert_allclose(result.total_stat, 21625.845714, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)
assert_allclose(pars["index"].value, 3, rtol=1e-2)
assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)
# background norm 1
assert_allclose(pars[8].value, 0.5, rtol=1e-2)
assert_allclose(pars[8].error, 0.015811, rtol=1e-2)
# background norm 2
assert_allclose(pars[11].value, 1, rtol=1e-2)
assert_allclose(pars[11].error, 0.02147, rtol=1e-2)
# test mask_safe evaluation
dataset_1.mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
dataset_2.mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
stat = datasets.stat_sum()
assert_allclose(stat, 14823.772744, rtol=1e-5)
region = sky_model.spatial_model.to_region()
initial_counts = dataset_1.counts.copy()
with mpl_plot_check():
dataset_1.plot_residuals(kwargs_spectral=dict(region=region))
# check dataset has not changed
assert initial_counts == dataset_1.counts
# test model evaluation outside image
dataset_1.models[0].spatial_model.lon_0.value = 150
dataset_1.npred()
assert not dataset_1.evaluators["test-model"].contributes
@requires_dependency("iminuit")
@requires_data()
def test_map_fit_one_energy_bin(sky_model, geom_image):
energy_axis = geom_image.axes["energy"]
geom_etrue = geom_image.to_image().to_cube([energy_axis.copy(name="energy_true")])
dataset = get_map_dataset(geom_image, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
sky_model.spectral_model.index.value = 3.0
sky_model.spectral_model.index.frozen = True
dataset.models[f"{dataset.name}-bkg"].spectral_model.norm.value = 0.5
dataset.counts = dataset.npred()
# Move a bit away from the best-fit point, to make sure the optimiser runs
sky_model.parameters["sigma"].value = 0.21
dataset.models[f"{dataset.name}-bkg"].parameters["norm"].frozen = True
fit = Fit()
result = fit.run(datasets=[dataset])
result = result["optimize_result"]
assert result.success
npred = dataset.npred().data.sum()
assert_allclose(npred, 16538.124036, rtol=1e-3)
assert_allclose(result.total_stat, -34844.125047, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.001689, rtol=1e-2)
assert_allclose(pars["sigma"].value, 0.2, rtol=1e-2)
assert_allclose(pars["sigma"].error, 0.00092, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 8.127593e-14, rtol=1e-2)
def test_create():
# tests empty datasets created
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
e_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 3), name="energy", unit=u.TeV, interp="log"
)
e_true = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), name="energy_true", unit=u.TeV, interp="log"
)
geom = WcsGeom.create(binsz=0.02, width=(2, 2), axes=[e_reco])
empty_dataset = MapDataset.create(
geom=geom, energy_axis_true=e_true, rad_axis=rad_axis
)
assert empty_dataset.counts.data.shape == (2, 100, 100)
assert empty_dataset.exposure.data.shape == (3, 100, 100)
assert empty_dataset.psf.psf_map.data.shape == (3, 50, 10, 10)
assert empty_dataset.psf.exposure_map.data.shape == (3, 1, 10, 10)
assert isinstance(empty_dataset.edisp, EDispKernelMap)
assert empty_dataset.edisp.edisp_map.data.shape == (3, 2, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (3, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 300)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
def test_create_with_migra(tmp_path):
# tests empty datasets created
migra_axis = MapAxis(nodes=np.linspace(0.0, 3.0, 51), unit="", name="migra")
rad_axis = MapAxis(nodes= | np.linspace(0.0, 1.0, 51) | numpy.linspace |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import sys, os
from unittest.mock import patch
sys.path.append(os.path.abspath("..")) # current folder is ~/tests
from idaes.core.surrogate.pysmo.polynomial_regression import (
PolynomialRegression,
FeatureScaling,
)
import numpy as np
import pandas as pd
import pytest
class TestFeatureScaling:
test_data_1d = [[x] for x in range(10)]
test_data_2d = [[x, (x + 1) ** 2] for x in range(10)]
test_data_3d = [[x, x + 10, (x + 1) ** 2 + x + 10] for x in range(10)]
test_data_3d_constant = [[x, 10, (x + 1) ** 2 + 10] for x in range(10)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9]])
expected_output_2 = np.array([[0]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(
output_1, np.array(expected_output_1).reshape(10, 1)
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 100]])
expected_output_2 = np.array([[0, 1]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 19, 119]])
expected_output_2 = np.array([[0, 10, 11]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 10, 110]])
expected_output_2 = np.array([[0, 10, 11]])
scale = expected_output_3 - expected_output_2
scale[scale == 0.0] = 1.0
expected_output_1 = (input_array - expected_output_2) / scale
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [list])
def test_data_scaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
with pytest.raises(TypeError):
FeatureScaling.data_scaling(input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
output_1 = output_1.reshape(
output_1.shape[0],
)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array.reshape(10, 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1]])
max_array = np.array([[5]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_06(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1, 2, 3]])
max_array = np.array([[5, 6, 7]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
class TestPolynomialRegression:
y = np.array(
[
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 21)
for j in np.linspace(0, 10, 21)
]
)
full_data = {"x1": y[:, 0], "x2": y[:, 1], "y": y[:, 2]}
training_data = [
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 5)
for j in np.linspace(0, 10, 5)
]
test_data = [[i, (i + 1) ** 2] for i in range(10)]
test_data_large = [[i, (i + 1) ** 2] for i in range(200)]
test_data_1d = [[(i + 1) ** 2] for i in range(10)]
test_data_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(10)]
sample_points = [[i, (i + 1) ** 2] for i in range(8)]
sample_points_large = [[i, (i + 1) ** 2] for i in range(100)]
sample_points_1d = [[(i + 1) ** 2] for i in range(8)]
sample_points_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(8)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
assert PolyClass.max_polynomial_order == 5
assert (
PolyClass.number_of_crossvalidations == 3
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 4 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.75 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.5
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 10 # Default maximum number of iterations
assert PolyClass.solution_method == "pyomo" # Default solution_method
assert PolyClass.multinomials == 1 # Default multinomials
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
@pytest.mark.unit
def test__init__02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
assert PolyClass.max_polynomial_order == 3
assert (
PolyClass.number_of_crossvalidations == 5
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 6 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.5 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.4
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 20 # Default maximum number of iterations
assert (
PolyClass.solution_method == "mle"
) # Default solution_method, doesn't matter lower / upper characters
assert PolyClass.multinomials == 0 # Default multinomials
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [list])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__03(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [list])
def test__init__04(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__05(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_large)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__06(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_3d)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__07(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_3d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__08(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_1d)
regression_data_input = array_type2(self.sample_points_1d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__09(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=11,
)
assert (
PolyClass.number_of_crossvalidations == 11
) # Default number of cross-validations
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__10(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1.2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__11(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_large)
regression_data_input = array_type2(self.sample_points_large)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=11
)
assert PolyClass.max_polynomial_order == 10
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__12(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__13(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=-1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__14(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__15(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=-1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__16(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
regression_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__17(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=0,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__18(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__19(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__20(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__21(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=15
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__22(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__23(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="idaes",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__24(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
multinomials=3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__25(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=-2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__26(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__27(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
no_adaptive_samples=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__28(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
max_iter=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__29(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
overwrite=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__30(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname="solution.pkl",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__31(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=1,
)
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__32(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
assert PolyClass1.filename == PolyClass2.filename
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__33(self, array_type1, array_type2):
file_name1 = "sol_check1.pickle"
file_name2 = "sol_check2.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name1,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name2,
overwrite=True,
)
assert PolyClass1.filename == file_name1
assert PolyClass2.filename == file_name2
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__34(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolygClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
assert PolyClass1.filename == PolygClass2.filename
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.01,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.99,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
additional_data_input = np.array(
[
[
i**2,
((i + 1) * 2) + ((j + 1) * 2),
j**4,
((i + 1) * 2) + ((j + 1) ** 2),
]
for i in range(5)
for j in range(5)
]
)
training_data, cross_val_data = PolyClass.training_test_data_creation(
additional_features=additional_data_input
)
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations * 2
assert len(cross_val_data) == PolyClass.number_of_crossvalidations * 2
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert (
training_data["training_extras_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
assert (
cross_val_data["test_extras_" + str(i)].shape[0] == expected_test_size
)
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
concat_02 = np.concatenate(
(
training_data["training_extras_" + str(i)],
cross_val_data["test_extras_" + str(i)],
),
axis=0,
)
additional_data_sorted = additional_data_input[
np.lexsort(
(
additional_data_input[:, 3],
additional_data_input[:, 2],
additional_data_input[:, 1],
additional_data_input[:, 0],
)
)
]
concat_02_sorted = concat_02[
np.lexsort(
(concat_02[:, 3], concat_02[:, 2], concat_02[:, 1], concat_02[:, 0])
)
]
np.testing.assert_equal(additional_data_sorted, concat_02_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 4 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=2
)
poly_degree = 2
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 6 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=10
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 22 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
expected_output[:, 21] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=10,
multinomials=0,
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 21 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
additional_term = np.sqrt(x_input_train_data)
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data, additional_term
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = (
6 # New number of features should be = 2 * max_polynomial_order + 4
)
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
expected_output[:, 4] = additional_term[:, 0]
expected_output[:, 5] = additional_term[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 6613.875 # Calculated externally as sum(y^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
expected_value = 0 # Value should return zero for exact solution
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc,))
expected_value = np.array(
[[-97], [-635], [-635], [-5246.875], [-5246.875], [-3925]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[12.5], [75], [75], [593.75], [593.75], [437.5]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[0], [0], [0], [0], [0], [0]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_bfgs_parameter_optimization_01(self, array_type):
original_data_input = array_type(self.test_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
data_feed = PolynomialRegression(
original_data_input,
input_array,
maximum_polynomial_order=5,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_bfgs_parameter_optimization_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=4,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_mle_estimate_01(self):
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_mle_estimate_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_pyomo_optimization_01(self):
x_vector = np.array([[i**2, i, 1] for i in range(10)])
y = np.array([[i**2] for i in range(1, 11)])
expected_value = np.array([[1.0], [2.0], [1.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_pyomo_optimization_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 2 * 6613.875 # Calculated externally as sum(y^2) / m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 2 * 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = | np.zeros((x_data_nr, x_data_nc)) | numpy.zeros |
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import astropy.stats
import astropy.units as u
CLIGHT = 299792458.0 # m/s
def path_to_eazy_data():
return os.path.join(os.path.dirname(__file__), 'data')
def set_warnings(numpy_level='ignore', astropy_level='ignore'):
"""
Set global numpy and astropy warnings
Parameters
----------
numpy_level : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}
Numpy error level (see `~numpy.seterr`).
astropy_level : {'error', 'ignore', 'always', 'default', 'module', 'once'}
Astropy error level (see `~warnings.simplefilter`).
"""
from astropy.utils.exceptions import AstropyWarning
np.seterr(all=numpy_level)
warnings.simplefilter(astropy_level, category=AstropyWarning)
def running_median(xi, yi, NBIN=10, use_median=True, use_nmad=True, reverse=False, bins=None, x_func=astropy.stats.biweight_location, y_func=astropy.stats.biweight_location, std_func=astropy.stats.biweight_midvariance, integrate=False):
"""
Running median/biweight/nmad
"""
NPER = xi.size // NBIN
if bins is None:
so = np.argsort(xi)
if reverse:
so = so[::-1]
bx = np.linspace(0,len(xi),NBIN+1)
bins = np.interp(bx, np.arange(len(xi)), xi[so])
if reverse:
bins = bins[::-1]
NBIN = len(bins)-1
xm = np.arange(NBIN)*1.
xs = xm*0
ym = xm*0
ys = xm*0
N = np.arange(NBIN)
if use_median:
y_func = np.median
if use_nmad:
std_func = astropy.stats.mad_std
#idx = np.arange(NPER, dtype=int)
for i in range(NBIN):
in_bin = (xi > bins[i]) & (xi <= bins[i+1])
N[i] = in_bin.sum() #N[i] = xi[so][idx+NPER*i].size
if integrate:
xso = np.argsort(xi[in_bin])
ma = xi[in_bin].max()
mi = xi[in_bin].min()
xm[i] = (ma+mi)/2.
dx = (ma-mi)
ym[i] = np.trapz(yi[in_bin][xso], xi[in_bin][xso])/dx
else:
xm[i] = x_func(xi[in_bin])
ym[i] = y_func(yi[in_bin])
ys[i] = std_func(yi[in_bin])
# if use_median:
# xm[i] = np.median(xi[in_bin]) # [so][idx+NPER*i])
# ym[i] = np.median(yi[in_bin]) # [so][idx+NPER*i])
# else:
# xm[i] = astropy.stats.biweight_location(xi[in_bin]) # [so][idx+NPER*i])
# ym[i] = astropy.stats.biweight_location(yi[in_bin]) # [so][idx+NPER*i])
#
# if use_nmad:
# mad = astropy.stats.median_absolute_deviation
# ys[i] = 1.4826*mad(yi[in_bin]) # [so][idx+NPER*i])
# else:
# ys[i] = astropy.stats.biweight_midvariance(yi[in_bin]) # [so][idx+NPER*i])
return xm, ym, ys, N
def nmad(arr):
import astropy.stats
return 1.48*astropy.stats.median_absolute_deviation(arr)
def log_zgrid(zr=[0.7,3.4], dz=0.01):
"""Make a logarithmically spaced redshift grid
Parameters
----------
zr : [float, float]
Minimum and maximum of the desired grid
dz : float
Step size, dz/(1+z)
Returns
-------
zgrid : array-like
Redshift grid
"""
zgrid = np.exp(np.arange(np.log(1+zr[0]), np.log(1+zr[1]), dz))-1
return zgrid
def trapz_dx(x):
"""
Return trapezoid rule coefficients, useful for numerical integration
using a dot product
Parameters
----------
x : array-like
Independent variable
Returns
-------
dx : array_like
Coefficients for trapezoidal rule integration.
"""
dx = np.zeros_like(x)
diff = np.diff(x)/2.
dx[:-1] += diff
dx[1:] += diff
return dx
def clipLog(im, lexp=1000, cmap=[-1.4914, 0.6273], scale=[-0.1,10]):
"""
Return normalized array like DS9 log
"""
import numpy as np
contrast, bias = cmap
clip = (np.clip(im, scale[0], scale[1])-scale[0])/(scale[1]-scale[0])
clip_log = np.clip((np.log10(lexp*clip+1)/np.log10(lexp)-bias)*contrast+0.5, 0, 1)
return clip_log
def get_mw_dust(ra, dec, **kwargs):
"""
Wrapper around functions to try to query for the MW E(B-V)
"""
try:
ebv = get_dustmaps_dust(ra, dec, web=True)
return ebv
except:
pass
try:
ebv = get_dustmaps_dust(ra, dec, web=False)
return ebv
except:
pass
try:
ebv = get_irsa_dust(ra, dec, **kwargs)
return ebv
except:
pass
return 0.00
def get_dustmaps_dust(ra, dec, web=True, **kwargs):
"Use https://github.com/gregreen/dustmaps"
from dustmaps.sfd import SFDQuery, SFDWebQuery
from astropy.coordinates import SkyCoord
coords = SkyCoord(ra, dec, unit='deg', frame='icrs')
if web:
sfd = SFDWebQuery()
else:
sfd = SFDQuery()
ebv = sfd(coords)
return ebv
def get_irsa_dust(ra=53.1227, dec=-27.805089, type='SandF'):
"""
Get Galactic dust reddening from NED/IRSA at a given position
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
ra, dec : float
RA/Dec in decimal degrees.
type : 'SFD' or 'SandF'
Dust model, with
SandF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
SFD = Schlegel et al. 1998 (ApJ 500, 525)
Returns
-------
ebv : float
Color excess E(B-V), in magnitudes
"""
import os
import tempfile
import urllib.request
from astropy.table import Table
from lxml import objectify
query = 'http://irsa.ipac.caltech.edu/cgi-bin/DUST/nph-dust?locstr={0:.4f}+{1:.4f}+equ+j2000'.format(ra, dec)
req = urllib.request.Request(query)
response = urllib.request.urlopen(req)
resp_text = response.read().decode('utf-8')
root = objectify.fromstring(resp_text)
stats = root.result.statistics
if type == 'SFD':
return float(str(stats.refPixelValueSFD).split()[0])
else:
return float(str(stats.refPixelValueSandF).split()[0])
def fill_between_steps(x, y, z, ax=None, *args, **kwargs):
"""
Make `fill_between` work like linestyle='steps-mid'.
"""
so = np.argsort(x)
mid = x[so][:-1] + np.diff(x[so])/2.
xfull = np.append(np.append(x, mid), mid+np.diff(x[so])/1.e6)
yfull = np.append(np.append(y, y[:-1]), y[1:])
zfull = np.append(np.append(z, z[:-1]), z[1:])
so = np.argsort(xfull)
if ax is None:
ax = plt.gca()
ax.fill_between(xfull[so], yfull[so], zfull[so], *args, **kwargs)
class GalacticExtinction(object):
def __init__(self, EBV=0, Rv=3.1, force=None, radec=None, ebv_type='SandF'):
"""
Wrapper to use either `~specutils.extinction` or the `~extinction`
modules, which have different calling formats. The results from
both of these modules should be equivalent.
Parameters
----------
EBV : float
Galactic reddening, e.g., from `https://irsa.ipac.caltech.edu/applications/DUST/`.
Rv : float
Selective extinction ratio, `Rv=Av/(E(B-V))`.
radec : None or (float, float)
If provided, try to determine EBV based on these coordinates
with `get_irsa_dust(type=[ebv_type])` or `dustmaps`.
force : None, 'extinction', 'specutils.extinction'
Force use one or the other modules. If `None`, then first try
to import `~specutils.extinction` and if that fails use
`~extinction`.
"""
import importlib
# Import handler
if force == 'specutils.extinction':
import specutils.extinction
self.module = 'specutils.extinction'
elif force == 'extinction':
from extinction import Fitzpatrick99
self.module = 'extinction'
elif force == 'dust_extinction':
from dust_extinction.parameter_averages import F99
self.module = 'dust_extinction'
else:
modules = [['dust_extinction.parameter_averages', 'F99'],
['extinction','Fitzpatrick99'],
['specutils.extinction','ExtinctionF99']]
self.module = None
for (mod, cla) in modules:
try:
_F99 = getattr(importlib.import_module(mod), cla)
self.module = mod
break
except:
continue
if self.module is None:
raise ImportError("Couldn't import extinction module from "
"dust_extinction, extinction or specutils")
# try:
# from specutils.extinction import ExtinctionF99
# self.module = 'specutils.extinction'
# except:
# from extinction import Fitzpatrick99
# self.module = 'extinction'
if radec is not None:
self.EBV = get_mw_dust(ra=radec[0], dec=radec[1], type=ebv_type)
else:
self.EBV = EBV
self.Rv = Rv
if self.module == 'dust_extinction.parameter_averages':
self.f99 = _F99(Rv=self.Rv)
elif self.module == 'specutils.extinction':
self.f99 = _F99(self.Av)
#self.Alambda = f99(self.wave*u.angstrom)
else:
self.f99 = _F99(self.Rv)
#self.Alambda = f99(self.wave*u.angstrom, Av)
@property
def Av(self):
return self.EBV*self.Rv
@property
def info(self):
msg = ('F99 extinction with `{0}`: Rv={1:.1f}, '
'E(B-V)={2:.3f} (Av={3:.2f})')
return msg.format(self.module, self.Rv, self.EBV, self.Av)
def __call__(self, wave):
"""
Compute Fitzpatrick99 extinction.
Parameters
----------
wave : float or `~numpy.ndarray`
Observed-frame wavelengths. If no `unit` attribute available,
assume units are `~astropy.units.Angstrom`.
Returns
-------
Alambda : like `wave`
F99 extinction (mags) as a function of wavelength. Output will
be set to zero below 909 Angstroms and above 6 microns as the
extinction modules themselves don't compute outside that range.
"""
import astropy.units as u
if not hasattr(wave, 'unit'):
unit = u.Angstrom
else:
unit = 1
inwave = np.squeeze(wave).flatten()
if self.module == 'dust_extinction.parameter_averages':
clip = (inwave*unit > 1/10.*u.micron)
clip &= (inwave*unit < 1/0.3*u.micron)
else:
clip = (inwave*unit > 909*u.angstrom) & (inwave*unit < 6*u.micron)
Alambda = np.zeros(inwave.shape)
if clip.sum() == 0:
return Alambda
else:
if self.module == 'dust_extinction.parameter_averages':
flam = self.f99.extinguish(inwave[clip]*unit, Av=self.Av)
Alambda[clip] = -2.5*np.log10(flam)
elif self.module == 'specutils.extinction':
Alambda[clip] = self.f99(inwave[clip]*unit)
else:
Alambda[clip] = self.f99(inwave[clip]*unit, self.Av)
return Alambda
def abs_mag_to_luminosity(absmag, pivot=None, output_unit=u.L_sun):
"""
Convert absolute AB mag to luminosity units
Parameters
----------
absmag : array-like
Absolute AB magnitude.
pivot : float
Filter pivot wavelength associated with the magnitude. If no units,
then assume `~astropy.units.Angstrom`.
output_unit : `~astropy.units.core.Unit`
Desired output unit. Must specify a ``pivot`` wavelength for output
power units, e.g., `~astropy.unit.L_sun`.
"""
if pivot is None:
nu = 1.
else:
if hasattr(pivot, 'unit'):
wunit = 1
else:
wunit = u.Angstrom
nu = ((CLIGHT*u.m/u.second)/(pivot*wunit)).to(u.Hz)
fjy = 3631*u.jansky * 10**(-0.4*absmag)
d10 = (10*u.pc).to(u.cm)
f10 = fjy * 4 * np.pi * d10**2 * nu
return f10.to(output_unit)
def zphot_zspec(zphot, zspec, zlimits=None, zmin=0, zmax=4, axes=None, figsize=[6,7], minor=0.5, skip=2, selection=None, catastrophic_limit=0.15, title=None, min_zphot=0.02, alpha=0.2, extra_xlabel='', extra_ylabel='', xlabel=r'$z_\mathrm{spec}$', ylabel=r'$z_\mathrm{phot}$', label_pos=(0.05, 0.95), label_kwargs=dict(ha='left', va='top', fontsize=10), label_prefix='', format_axes=True, color='k', point_label=None, **kwargs):
"""
Make zphot_zspec plot scaled by log(1+z) and show uncertainties
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
clip = (zphot > min_zphot) & (zspec > zmin) & (zspec <= zmax)
if selection is not None:
clip &= selection
dz = (zphot-zspec)/(1+zspec)
#izbest = np.argmin(self.fit_chi2, axis=1)
clip_cat = (np.abs(dz) < catastrophic_limit)
frac_cat = 1-(clip & clip_cat).sum() / clip.sum()
NOUT = (clip & ~clip_cat).sum()
gs = GridSpec(2,1, height_ratios=[6,1])
NEW_AXES = axes is None
if NEW_AXES:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0,0])
else:
ax = axes[0]
fig = None
if title is not None:
ax.set_title(title)
if zlimits is not None:
yerr = np.log10(1+np.abs(zlimits.T - zphot))
ax.errorbar(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
yerr=yerr[:,clip & ~clip_cat], marker='.', alpha=alpha,
color='r', linestyle='None')
ax.errorbar(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
yerr=yerr[:,clip & clip_cat], marker='.', alpha=alpha,
color=color, linestyle='None', label=point_label)
else:
ax.scatter(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
marker='.', alpha=alpha, color='r')
ax.scatter(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
marker='.', alpha=alpha, color=color, label=point_label)
if NEW_AXES | format_axes:
xt = np.arange(zmin, zmax+0.1, minor)
xl = np.log10(1+xt)
ax.plot(xl, xl, color='r', alpha=0.5)
ax.set_xlim(xl[0], xl[-1])
ax.set_ylim(xl[0],xl[-1])
xtl = list(xt)
if skip > 0:
for i in range(1, len(xt), skip):
xtl[i] = ''
ax.set_xticks(xl)
if axes is None:
ax.set_xticklabels([])
else:
if len(axes) == 1:
ax.set_xticks(xl)
ax.set_xticklabels(xtl);
ax.set_xlabel(xlabel + extra_xlabel)
ax.set_yticks(xl); ax.set_yticklabels(xtl);
ax.set_ylabel(ylabel + extra_ylabel)
sample_nmad = nmad(dz[clip])
sample_cat_nmad = nmad(dz[clip & clip_cat])
if label_pos is not None:
msg = r'{label_prefix} N={N} ({NOUT}, {err_frac:4.1f}%), $\sigma$={sample_nmad:.4f} ({sample_cat_nmad:.4f})'
msg = msg.format(label_prefix=label_prefix,
N=clip.sum(), err_frac=frac_cat*100,
sample_nmad=sample_nmad,
sample_cat_nmad=sample_cat_nmad, NOUT=NOUT)
ax.text(label_pos[0], label_pos[1], msg, transform=ax.transAxes)
if axes is None:
ax = fig.add_subplot(gs[1,0])
else:
if len(axes) == 2:
ax = axes[1]
else:
return True
if zlimits is not None:
yerr = np.abs(zlimits.T-zphot)#/(1+self.cat['z_spec'])
ax.errorbar(np.log10(1+zspec[clip & ~clip_cat]), dz[clip & ~clip_cat],
yerr=yerr[:,clip & ~clip_cat],
marker='.', alpha=alpha, color='r', linestyle='None')
ax.errorbar(np.log10(1+zspec[clip & clip_cat]), dz[clip & clip_cat],
yerr=yerr[:,clip & clip_cat],
marker='.', alpha=alpha, color='k', linestyle='None')
else:
ax.scatter(np.log10(1+zspec[clip & ~clip_cat]), dz[clip & ~clip_cat],
marker='.', alpha=alpha, color='r')
ax.scatter(np.log10(1+zspec[clip & clip_cat]), dz[clip & clip_cat],
marker='.', alpha=alpha, color='k')
if fig is not None:
ax.set_xticks(xl); ax.set_xticklabels(xtl);
ax.set_xlim(xl[0], xl[-1])
ax.set_ylim(-6*sample_nmad, 6*sample_nmad)
ax.set_yticks([-3*sample_nmad, 0, 3*sample_nmad])
ax.set_yticklabels([r'$-3\sigma$',r'$0$',r'$+3\sigma$'])
ax.set_xlabel(xlabel + extra_xlabel)
ax.set_ylabel(r'$\Delta z / 1+z$')
for a in fig.axes:
a.grid()
fig.tight_layout(pad=0.1)
return fig
else:
return True
def query_html(ra, dec):
"""
Return HTML string of queries at a position
"""
html = f"({ra:.6f}, {dec:.6f}) "
for func, name in zip([cds_query, eso_query, mast_query, alma_query, show_legacysurvey, hscmap_query], ['CDS','ESO','MAST','ALMA', 'LEG','HSC']):
url = func(ra, dec)
html += f' <a href="{url}">{name}</a>'
return html
def cds_query(ra, dec, radius=1.):
"""
Open browswer with CDS catalog query around central position
"""
#rd = self.get('pan fk5').strip()
rd = f'{ra} {dec}'
rdst = rd.replace('+', '%2B').replace('-', '%2D').replace(' ', '+')
url = (f'http://vizier.u-strasbg.fr/viz-bin/VizieR?'
f'-c={rdst}&-c.rs={radius:.1f}')
#os.system(f'open {url}')
return url
def eso_query(ra, dec, radius=1., dp_types=['CUBE','IMAGE'], extra=''):
"""
Open browser with ESO archive query around central position.
``radius`` in arcmin.
"""
#ra, dec = self.get('pan fk5').strip().split()
dp_type = ','.join(dp_types)
url = (f'https://archive.eso.org/scienceportal/home?'
f'pos={ra},{dec}&r={radius/60.}&dp_type={dp_type}{extra}')
#os.system(f'open {url}')
return url
def mast_query(ra, dec, instruments=['WFC3','ACS','WFPC2'], max=1000):
"""
Open browser with MAST archive query around central position
"""
#ra, dec = self.get('pan fk5').strip().split()
if len(instruments) > 0:
instr='&sci_instrume='+','.join(instruments)
else:
instr = ''
url = (f'https://archive.stsci.edu/hst/search.php?RA={ra}&DEC={dec}'
f'&sci_aec=S{instr}&max_records={max}&outputformat=HTML_Table'
'&action=Search')
#os.system(f'open {url}')
return url
def alma_query(ra, dec, mirror="almascience.eso.org", radius=1, extra=''):
"""
Open browser with ALMA archive query around central position
"""
#ra, dec = self.get('pan fk5').strip().split()
url = (f"https://{mirror}/asax/?result_view=observation"
f"&raDec={ra}%20{dec},{radius}{extra}")
#os.system(f'open "{url}"')
return url
def hscmap_query(ra, dec, open=True):
"""
Function to open HSC explorer in browser centered on target coordinates
"""
import os
rrad = ra/180*np.pi
drad = dec/180*np.pi
url = (f"https://hsc-release.mtk.nao.ac.jp/hscMap-pdr2/app/#/?_=%7B%22view%22%3A%7B%22a%22%3A{rrad},%22d%22%3A{drad}"
",%22fovy%22%3A0.00009647627785850188,%22roll%22%3A0%7D,%22sspParams%22%3A%7B%22type%22%3A%22"
"SDSS_TRUE_COLOR%22,%22filter%22%3A%5B%22HSC-Y%22,%22HSC-Z%22,%22HSC-I%22%5D,%22simpleRgb"
"%22%3A%7B%22beta%22%3A22026.465794806718,%22a%22%3A1,%22bias%22%3A0.05,%22b0%22%3A0%7D,%22"
"sdssTrueColor%22%3A%7B%22beta%22%3A40106.59228119989,%22a%22%3A2.594451857120983,%22bias%22%3A0.05,"
"%22b0%22%3A0%7D%7D,%22externalTiles%22%3A%5B%5D,%22activeReruns%22%3A%5B%22pdr2_wide%22,%22pdr2_dud"
"%22%5D%7D")
return url
def show_legacysurvey(ra, dec, layer='dr8', zoom=14):
"""
Open browser with legacysurvey.org panner around central position
"""
#ra, dec = self.get('pan fk5').strip().split()
url = (f'http://legacysurvey.org/viewer?ra={ra}&dec={dec}'
f'&layer={layer}&zoom={zoom}')
#os.system(f'open {url}')
return url
def interp_conserve(x, xp, fp, left=0., right=0.):
"""
Interpolation analogous to `~numpy.interp` but conserving "flux".
Parameters
----------
x : `~numpy.ndarray`
Desired interpolation locations
xp, fp : `~numpy.ndarray`
The `x` and `y` coordinates of the function to be interpolated. The
`x` array can be irregularly spaced but should be increase
monotonically.
left, right : float
Values to use for extrapolation below the minimum and maximum limits
of `x`.
Returns
-------
y : like `x`
Interpolated values.
Interpolation performed by trapezoidal integration between the midpoints
of the output `x` array with `~numpy.trapz`.
.. note:: For a faster `cython` implementation of this function, see
`~grizli.utils_c.interp_conserve_c`.
"""
midpoint = (x[1:]-x[:-1])/2.+x[:-1]
midpoint = np.append(midpoint, np.array([x[0],x[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
int_midpoint = np.interp(midpoint, xp, fp, left=left, right=right)
int_midpoint[midpoint > xp.max()] = right
int_midpoint[midpoint < xp.min()] = left
fullx = np.append(xp, midpoint)
fully = np.append(fp, int_midpoint)
so = np.argsort(fullx)
fullx, fully = fullx[so], fully[so]
outy = x*0.
dx = midpoint[1:]-midpoint[:-1]
for i in range(len(x)):
bin = (fullx >= midpoint[i]) & (fullx <= midpoint[i+1])
outy[i] = np.trapz(fully[bin], fullx[bin])/dx[i]
return outy
class emceeChain():
def __init__(self, chain=None, file=None, param_names=[],
burn_fraction=0.5, sampler=None):
self.param_names = []
if chain is not None:
self.chain = chain
if file is not None:
if 'fits' in file.lower():
self.load_fits(file=file)
else:
self.load_chain(file=file)
self.process_chain(param_names = param_names,
burn_fraction=burn_fraction)
#
if sampler is not None:
from numpy import unravel_index
max_ix = unravel_index(sampler.lnprobability.argmax(), sampler.lnprobability.shape)
self.map = self.chain[max_ix[0], max_ix[1],:]
self.is_map = True
else:
self.map = self.median
self.is_map = False
def process_chain(self, param_names=[], burn_fraction=0.5):
"""
Define parameter names and get parameter statistics
"""
self.nwalkers, self.nstep, self.nparam = self.chain.shape
if param_names == []:
if self.param_names == []:
for i in range(self.nparam):
param_names.append('a%d' %(i+1))
self.param_names = param_names
else:
if len(param_names) != self.nparam:
print('param_names must have N=%d (or zero) entries' %(self.nparam))
return False
self.param_names = param_names
self.param_dict = {}
for i in range(self.nparam):
self.param_dict[self.param_names[i]] = i
self.nburn = int(np.round(burn_fraction*self.nstep))
self.stats = {}
self.median = np.zeros(self.nparam)
for param in self.param_names:
pid = self.param_dict[param]
self.stats[param] = self.get_stats(pid, burn=self.nburn)
self.median[pid] = self.stats[param]['q50']
def get_stats(self, pid, burn=0, raw=False):
"""
Get percentile statistics for a parameter in the chain
"""
if raw:
pchain = pid*1.
else:
pchain = self.chain[:,burn:,pid].flatten()
stats = {}
stats['q05'] = np.percentile(pchain, 5)
stats['q16'] = | np.percentile(pchain, 16) | numpy.percentile |
import os
import pathlib
import sys
import astrodata
import gemini_instruments # noqa
import numpy as np
import pytest
from astrodata.testing import download_from_archive
from geminidr.gemini.lookups import DQ_definitions as DQ
from geminidr.gmos.primitives_gmos_spect import GMOSSpect
from gempy.utils import logutils
from scipy.ndimage import binary_dilation
TESFILE1 = "S20190808S0048_mosaic.fits" # R400 at 0.740 um
TESFILE2 = "S20190808S0048_varAdded.fits" # R400 at 0.740 um
# Tests Definitions -----------------------------------------------------------
@pytest.mark.gmosls
@pytest.mark.preprocessed_data
def test_cosmics_on_mosaiced_data(path_to_inputs, caplog):
ad = astrodata.open(os.path.join(path_to_inputs, TESFILE1))
ext = ad[0]
# add some additional fake cosmics
size = 50
np.random.seed(42)
cr_x = np.random.randint(low=5, high=ext.shape[0] - 5, size=size)
cr_y = | np.random.randint(low=5, high=ext.shape[1] - 5, size=size) | numpy.random.randint |
#!/usr/bin/env python
u"""
read_cryosat_L1b.py
Written by <NAME> (02/2020)
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid, n_records, MODE):
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
#-- CryoSat-2 mode specific waveforms
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [512]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [512]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
#-- Phase Difference [512]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid, n_records, MODE):
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Star Tracker ID
Location['ST_ID'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
Location['Spares'] = np.zeros((n_records,n_blocks,2),dtype=np.int16)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
#-- CryoSat-2 mode specific waveform variables
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Location['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = | np.fromfile(fid,dtype='>i2',count=1) | numpy.fromfile |
import operator
from os import listdir
from random import shuffle
import numpy as np
from network_lib import netural_network
def single_2_by_2(L):
Ln = | np.array(L) | numpy.array |
import unittest
from yauber_algo.errors import *
class SumTestCase(unittest.TestCase):
def test_sum(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import sum
#
# Function settings
#
algo = 'sum'
func = sum
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([nan, nan, 6, 7, 8, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3
),
suffix='regular_sum'
)
s.check_regular(
array([nan, nan, 6, 7, 8, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
-1,
),
suffix='regular_neg_period',
exception=YaUberAlgoArgumentError
)
s.check_regular(
| array([nan, nan, 6, 7, 8, 9, 6]) | numpy.array |
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
# Check that projected points are inside region
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.z, 0.0)
np.testing.assert_array_less(rays.x, 0.5)
np.testing.assert_array_less(rays.y, 0.5)
np.testing.assert_array_less(-0.5, rays.x)
np.testing.assert_array_less(-0.5, rays.y)
assert len(rays) == 1000
@timer
def test_asPolar():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
nrad = rng.integers(1, 11)
naz = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays)%6 == 0
# If we set inner=0, then last ray should
# intersect the center of the pupil
inner = 0.0
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
assert len(rays)%6 == 1
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.x[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.y[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.z[-1], 0, atol=1e-14)
@timer
def test_asSpokes():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
rings = rng.integers(1, 11)
spokes = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
spokes=spokes, rings=rings,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings*i:rings*(i+1)],
np.linspace(inner, outer, rings, endpoint=True)
)
for i in range(rings):
checkAngle(ths[i::rings], np.linspace(0, 2*np.pi, spokes, endpoint=False))
# Check explicit rings and spokes
rings = rng.uniform(inner, outer, rings)
spokes = rng.uniform(0, 2*np.pi, spokes)
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
rings=rings, spokes=spokes,
dirCos=dirCos
)
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Check Gaussian Quadrature
rings = rng.integers(5, 11)
spokes = 2*rings+1
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer,
rings=rings,
spacing='GQ',
dirCos=dirCos
)
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
Li, w = np.polynomial.legendre.leggauss(rings)
rings = np.sqrt((1+Li)/2)*outer
flux = w*np.pi/(2*spokes)
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
np.testing.assert_allclose(
rays.flux[len(rings)*i:len(rings)*(i+1)],
flux
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Sanity check GQ grids against literature
# Values from Forbes JOSA Vol. 5, No. 11 (1988) Table 1
rings = [1, 2, 3, 4, 5, 6]
rad = [
[0.70710678],
[0.45970084, 0.88807383],
[0.33571069, 0.70710678, 0.94196515],
[0.26349923, 0.57446451, 0.81852949, 0.96465961],
[0.21658734, 0.48038042, 0.70710678, 0.87706023, 0.97626324],
[0.18375321, 0.41157661, 0.61700114, 0.78696226, 0.91137517, 0.98297241]
]
w = [
[0.5],
[0.25, 0.25],
[0.13888889, 0.22222222, 0.13888889],
[0.08696371, 0.16303629, 0.16303629, 0.08696371],
[0.05923172, 0.11965717, 0.14222222, 0.11965717, 0.05923172],
[0.04283112, 0.09019039, 0.11697848, 0.11697848, 0.09019039, 0.04283112]
]
for rings_, rad_, w_ in zip(rings, rad, w):
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=1,
rings=rings_,
spacing='GQ',
dirCos=[0,0,-1]
)
spokes = rings_*2+1
radii = np.hypot(rays.x, rays.y)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings_*i:rings_*(i+1)],
rad_
)
np.testing.assert_allclose(
rays.flux[rings_*i:rings_*(i+1)]*spokes/(2*np.pi),
w_
)
@timer
def test_factory_optic():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
grid1 = batoid.RayVector.asGrid(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
nx=16
)
grid2 = batoid.RayVector.asGrid(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, lx=telescope.pupilSize,
nx=16
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asPolar(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
naz=100, nrad=20
)
grid2 = batoid.RayVector.asPolar(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
inner=telescope.pupilSize/2*telescope.pupilObscuration,
naz=100, nrad=20
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asSpokes(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
rings=10, spokes=21
)
grid2 = batoid.RayVector.asSpokes(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
rings=10, spokes=21
)
rays_allclose(grid1, grid2)
@timer
def test_getitem():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=10, naz=60
)
telescope.trace(rv)
# Single item indexing
for i in range(-len(rv), len(rv)):
rv1 = rv[i]
np.testing.assert_equal(rv1.r[0], rv.r[i])
np.testing.assert_equal(rv1.x[0], rv.x[i])
np.testing.assert_equal(rv1.y[0], rv.y[i])
np.testing.assert_equal(rv1.z[0], rv.z[i])
np.testing.assert_equal(rv1.v[0], rv.v[i])
np.testing.assert_equal(rv1.vx[0], rv.vx[i])
np.testing.assert_equal(rv1.vy[0], rv.vy[i])
np.testing.assert_equal(rv1.vz[0], rv.vz[i])
np.testing.assert_equal(rv1.t[0], rv.t[i])
np.testing.assert_equal(rv1.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv1.flux[0], rv.flux[i])
np.testing.assert_equal(rv1.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv1.failed[0], rv.failed[i])
assert rv1.r.flags.f_contiguous
assert rv1.v.flags.f_contiguous
# slice indexing
for i in range(-len(rv)//10, len(rv)//10):
slc = slice(i*10, (i+1)*10, 2)
rv2 = rv[slc]
np.testing.assert_equal(rv2.r, rv.r[slc])
np.testing.assert_equal(rv2.x, rv.x[slc])
np.testing.assert_equal(rv2.y, rv.y[slc])
np.testing.assert_equal(rv2.z, rv.z[slc])
np.testing.assert_equal(rv2.v, rv.v[slc])
np.testing.assert_equal(rv2.vx, rv.vx[slc])
np.testing.assert_equal(rv2.vy, rv.vy[slc])
np.testing.assert_equal(rv2.vz, rv.vz[slc])
np.testing.assert_equal(rv2.t, rv.t[slc])
np.testing.assert_equal(rv2.wavelength, rv.wavelength[slc])
np.testing.assert_equal(rv2.flux, rv.flux[slc])
np.testing.assert_equal(rv2.vignetted, rv.vignetted[slc])
np.testing.assert_equal(rv2.failed, rv.failed[slc])
assert rv2.r.flags.f_contiguous
assert rv2.v.flags.f_contiguous
# integer array indexing
idx = [0, -1, 1, -2, 2, -3, 50]
rv3 = rv[idx]
np.testing.assert_equal(rv3.r, rv.r[idx])
np.testing.assert_equal(rv3.x, rv.x[idx])
np.testing.assert_equal(rv3.y, rv.y[idx])
np.testing.assert_equal(rv3.z, rv.z[idx])
np.testing.assert_equal(rv3.v, rv.v[idx])
np.testing.assert_equal(rv3.vx, rv.vx[idx])
np.testing.assert_equal(rv3.vy, rv.vy[idx])
np.testing.assert_equal(rv3.vz, rv.vz[idx])
np.testing.assert_equal(rv3.t, rv.t[idx])
np.testing.assert_equal(rv3.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv3.flux, rv.flux[idx])
np.testing.assert_equal(rv3.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv3.failed, rv.failed[idx])
assert rv3.r.flags.f_contiguous
assert rv3.v.flags.f_contiguous
# boolean array indexing
idx = np.zeros(len(rv), dtype=bool)
idx[[0, -1, 5]] = True
rv4 = rv[idx]
np.testing.assert_equal(rv4.r, rv.r[idx])
np.testing.assert_equal(rv4.x, rv.x[idx])
np.testing.assert_equal(rv4.y, rv.y[idx])
np.testing.assert_equal(rv4.z, rv.z[idx])
np.testing.assert_equal(rv4.v, rv.v[idx])
np.testing.assert_equal(rv4.vx, rv.vx[idx])
np.testing.assert_equal(rv4.vy, rv.vy[idx])
np.testing.assert_equal(rv4.vz, rv.vz[idx])
np.testing.assert_equal(rv4.t, rv.t[idx])
np.testing.assert_equal(rv4.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv4.flux, rv.flux[idx])
np.testing.assert_equal(rv4.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv4.failed, rv.failed[idx])
assert rv4.r.flags.f_contiguous
assert rv4.v.flags.f_contiguous
# test iteration
for i, rv5 in enumerate(rv):
np.testing.assert_equal(rv5.r[0], rv.r[i])
np.testing.assert_equal(rv5.x[0], rv.x[i])
np.testing.assert_equal(rv5.y[0], rv.y[i])
np.testing.assert_equal(rv5.z[0], rv.z[i])
np.testing.assert_equal(rv5.v[0], rv.v[i])
np.testing.assert_equal(rv5.vx[0], rv.vx[i])
np.testing.assert_equal(rv5.vy[0], rv.vy[i])
np.testing.assert_equal(rv5.vz[0], rv.vz[i])
np.testing.assert_equal(rv5.t[0], rv.t[i])
np.testing.assert_equal(rv5.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv5.flux[0], rv.flux[i])
np.testing.assert_equal(rv5.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv5.failed[0], rv.failed[i])
assert rv5.r.flags.f_contiguous
assert rv5.v.flags.f_contiguous
for i, rv6 in enumerate(reversed(rv)):
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.x[0], rv.x[-i-1])
np.testing.assert_equal(rv6.y[0], rv.y[-i-1])
np.testing.assert_equal(rv6.z[0], rv.z[-i-1])
np.testing.assert_equal(rv6.v[0], rv.v[-i-1])
np.testing.assert_equal(rv6.vx[0], rv.vx[-i-1])
np.testing.assert_equal(rv6.vy[0], rv.vy[-i-1])
np.testing.assert_equal(rv6.vz[0], rv.vz[-i-1])
np.testing.assert_equal(rv6.t[0], rv.t[-i-1])
np.testing.assert_equal(rv6.wavelength[0], rv.wavelength[-i-1])
np.testing.assert_equal(rv6.flux[0], rv.flux[-i-1])
np.testing.assert_equal(rv6.vignetted[0], rv.vignetted[-i-1])
np.testing.assert_equal(rv6.failed[0], rv.failed[-i-1])
assert rv6.r.flags.f_contiguous
assert rv6.v.flags.f_contiguous
with np.testing.assert_raises(IndexError):
rv[len(rv)]
with np.testing.assert_raises(IndexError):
rv[-len(rv)-1]
def test_fromStop():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=4, naz=10
)
rv_traced = telescope.trace(rv.copy())
rv_stop = telescope.stopSurface.interact(rv.copy())
for rv1, rv_traced1, rv_stop1 in zip(rv, rv_traced, rv_stop):
rv_test1 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test2 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, backDist=telescope.backDist, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y= | np.deg2rad(0.2) | numpy.deg2rad |
import copy
import itertools
import multiprocessing
import string
import traceback
import warnings
from multiprocessing import Pool
from operator import itemgetter
import jellyfish as jf
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy.stats import wasserstein_distance
from simod.configuration import Configuration, Metric
from . import alpha_oracle as ao
from .alpha_oracle import Rel
from ..support_utils import progress_bar_async
class SimilarityEvaluator:
"""Evaluates the similarity of two event-logs."""
def __init__(self, log_data: pd.DataFrame, simulation_data: pd.DataFrame, settings: Configuration, max_cases=500,
dtype='log'):
self.dtype = dtype
self.log_data = copy.deepcopy(log_data)
self.simulation_data = copy.deepcopy(simulation_data)
self.max_cases = max_cases
self.one_timestamp = settings.read_options.one_timestamp
self._preprocess_data(dtype)
def _preprocess_data(self, dtype):
preprocessor = self._get_preprocessor(dtype)
return preprocessor()
def _get_preprocessor(self, dtype):
if dtype == 'log':
return self._preprocess_log
elif dtype == 'serie':
return self._preprocess_serie
else:
raise ValueError(dtype)
def _preprocess_log(self):
self.ramp_io_perc = 0.2
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
data = pd.concat([self.log_data, self.simulation_data], axis=0, ignore_index=True)
if (('processing_time' not in data.columns) or ('waiting_time' not in data.columns)):
data = self.calculate_times(data)
data = self.scaling_data(data)
# save data
self.log_data = data[data.source == 'log']
self.simulation_data = data[data.source == 'simulation']
self.alias = self.create_task_alias(data, 'task')
self.alpha_concurrency = ao.AlphaOracle(self.log_data, self.alias, self.one_timestamp, True)
# reformat and sampling data
self.log_data = self.reformat_events(self.log_data.to_dict('records'), 'task')
self.simulation_data = self.reformat_events(self.simulation_data.to_dict('records'), 'task')
num_traces = int(len(self.simulation_data) * self.ramp_io_perc)
self.simulation_data = self.simulation_data[num_traces:-num_traces]
self.log_data = list(map(lambda i: self.log_data[i],
np.random.randint(0, len(self.log_data), len(self.simulation_data))))
def _preprocess_serie(self):
# load data
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
def measure_distance(self, metric: Metric, verbose=False):
"""
Measures the distance of two event-logs
with with tsd or dl and mae distance
Returns
-------
distance : float
"""
self.verbose = verbose
# similarity measurement and matching
evaluator = self._get_evaluator(metric)
if metric in [Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
distance = evaluator(self.log_data, self.simulation_data, criteria=metric)
else:
distance = evaluator(self.log_data, self.simulation_data, metric)
self.similarity = {'metric': metric, 'sim_val': np.mean([x['sim_score'] for x in distance])}
def _get_evaluator(self, metric: Metric):
if self.dtype == 'log':
if metric in [Metric.TSD, Metric.DL, Metric.MAE, Metric.DL_MAE]:
return self._evaluate_seq_distance
elif metric is Metric.LOG_MAE:
return self.log_mae_metric
elif metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.log_emd_metric
else:
raise ValueError(metric)
elif self.dtype == 'serie':
if metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.serie_emd_metric
else:
raise ValueError(metric)
else:
raise ValueError(self.dtype)
# =============================================================================
# Timed string distance
# =============================================================================
def _evaluate_seq_distance(self, log_data, simulation_data, metric: Metric):
"""
Timed string distance calculation
Parameters
----------
log_data : Ground truth list
simulation_data : List
Returns
-------
similarity : tsd similarity
"""
similarity = list()
# define the type of processing sequencial or parallel
cases = len(set([x['caseid'] for x in log_data]))
if cases <= self.max_cases:
args = (metric, simulation_data, log_data,
self.alpha_concurrency.oracle,
({'min': 0, 'max': len(simulation_data)},
{'min': 0, 'max': len(log_data)}))
df_matrix = self._compare_traces(args)
else:
cpu_count = multiprocessing.cpu_count()
mx_len = len(log_data)
ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count / 2)))
ranges = list(itertools.product(*[ranges, ranges]))
reps = len(ranges)
pool = Pool(processes=cpu_count)
# Generate
args = [(metric, simulation_data[r[0]['min']:r[0]['max']],
log_data[r[1]['min']:r[1]['max']],
self.alpha_concurrency.oracle,
r) for r in ranges]
p = pool.map_async(self._compare_traces, args)
if self.verbose:
progress_bar_async(p, f'evaluating {metric}:', reps)
pool.close()
# Save results
df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)
df_matrix.sort_values(by=['i', 'j'], inplace=True)
df_matrix = df_matrix.reset_index().set_index(['i', 'j'])
if metric == Metric.DL_MAE:
dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()
mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()
# MAE normalized
max_mae = mae_matrix.max()
mae_matrix = np.divide(mae_matrix, max_mae)
# multiple both matrixes by Beta equal to 0.5
dl_matrix = | np.multiply(dl_matrix, 0.5) | numpy.multiply |
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(prog="region_optimize.py", description="Find the kernel parameters for Gaussian region zones.")
parser.add_argument("spectrum", help="JSON file containing the data, model, and residual.")
parser.add_argument("--sigma0", type=float, default=2, help="(AA) to use in fitting")
args = parser.parse_args()
import json
import numpy as np
from scipy.optimize import fmin
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
import Starfish
from Starfish.model import PhiParam
from Starfish.covariance import get_dense_C, make_k_func
from Starfish import constants as C
# Load the spectrum and then take the data products.
f = open(args.spectrum, "r")
read = json.load(f) # read is a dictionary
f.close()
wl = np.array(read["wl"])
# data_full = np.array(read["data"])
# model = np.array(read["model"])
resid = np.array(read["resid"])
sigma = np.array(read["sigma"])
spectrum_id = read["spectrum_id"]
order = read["order"]
fname = Starfish.specfmt.format(spectrum_id, order) + "regions.json"
f = open(fname, "r")
read = json.load(f) # read is a dictionary
f.close()
mus = np.array(read["mus"])
assert spectrum_id == read["spectrum_id"], "Spectrum/Order mismatch"
assert order == read["order"], "Spectrum/Order mismatch"
# Load the guesses for the global parameters from the .json
# If the file exists, optionally initiliaze to the chebyshev values
fname = Starfish.specfmt.format(spectrum_id, order) + "phi.json"
try:
phi = PhiParam.load(fname)
except FileNotFoundError:
print("No order parameter file found (e.g. sX_oXXphi.json), please run `star.py --initPhi` first.")
raise
# Puposely set phi.regions to none for this exercise, since we don't care about existing regions, and likely we want to overwrite them.
phi.regions = None
def optimize_region_residual(wl, residuals, sigma, mu):
'''
Determine the optimal parameters for the line kernels by fitting a Gaussian directly to the residuals.
'''
# Using sigma0, truncate the wavelength vector and residulas to include
# only those portions that fall in the range [mu - sigma, mu + sigma]
ind = (wl > mu - args.sigma0) & (wl < mu + args.sigma0)
wl = wl[ind]
R = residuals[ind]
sigma = sigma[ind]
sigma_mat = phi.sigAmp * sigma**2 * np.eye(len(wl))
max_r = 6.0 * phi.l # [km/s]
k_func = make_k_func(phi)
# Use the full covariance matrix when doing the likelihood eval
CC = get_dense_C(wl, k_func=k_func, max_r=max_r) + sigma_mat
factor, flag = cho_factor(CC)
logdet = np.sum(2 * np.log((np.diag(factor))))
rr = C.c_kms/mu * np.abs(mu - wl) # Km/s
def fprob(p):
# The likelihood function
# Requires sign about amplitude, so we can't use log.
amp, sig = p
gauss = amp * | np.exp(-0.5 * rr**2/sig**2) | numpy.exp |
###################################################################
# EEPOWER.PY
#
# A library of functions, constants and more
# that are related to Power in Electrical Engineering.
#
# Written by <NAME>
#
# Special Thanks To:
# <NAME> - Idaho Power
#
# Included Constants:
# - Micro (mu) Multiple: u
# - Mili Multiple: m
# - Kilo Multiple: k
# - Mega Multiple: M
# - 'A' Operator for Symmetrical Components: a
# - Not a Number value (NaN): NAN
#
# Symmetrical Components Matricies:
# - ABC to 012 Conversion: Aabc
# - 012 to ABC Conversion: A012
#
# Included Functions
# - Phasor V/I Generator: phasor
# - Phasor Impedance Generator: phasorz
# - Complex Display Function: cprint
# - Parallel Impedance Adder: parallelz
# - V/I Line/Phase Converter: phaseline
# - Power Set Values: powerset
# - Power Triangle Function: powertriangle
# - Transformer SC OC Tests: transformertest
# - Phasor Plot Generator: phasorplot
# - Total Harmonic Distortion: thd
# - Total Demand Distortion: tdd
# - Reactance Calculator: reactance
# - Non-Linear PF Calc: nlinpf
# - Harmonic Limit Calculator: harmoniclimit
# - Power Factor Distiortion: pfdist
# - Short-Circuit RL Current: iscrl
# - Voltage Divider: voltdiv
# - Current Divider: curdiv
# - Instantaneous Power Calc.: instpower
# - Delta-Wye Network Converter: dynetz
# - Single Line Power Flow: powerflow
# - Thermocouple Temperature: thermocouple
# - Cold Junction Voltage: coldjunction
# - RTD Temperature Calculator: rtdtemp
#
# Additional functions available in sub-modules:
# - passives.py (renamed from capacitor.py)
# - fault.py
# - electronics.py
# - perunit.py
# - systemsolution.py
###################################################################
name = "eepower"
ver = "2.2.1"
# Import Submodules
from .passives import *
from .perunit import *
from .systemsolution import *
# Import Submodules as External Functions
from . import fault
from . import electronics
# Import libraries as needed:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cmath as c
# Define constants
a = c.rect(1,np.radians(120)) # A Operator for Sym. Components
p = 1e-12 # Pico Multiple
n = 1e-9 # Nano Multiple
u = 1e-6 # Micro (mu) Multiple
m = 1e-3 # Mili Multiple
k = 1e+3 # Kili Multiple
M = 1e+6 # Mega Multiple
NAN = float('nan')
VLLcVLN = c.rect(np.sqrt(3),np.radians(30)) # Conversion Operator
ILcIP = c.rect(np.sqrt(3),np.radians(-30)) # Conversion Operator
# Define symmetrical components matricies
Aabc = 1/3 * np.array([[ 1, 1, 1 ], # Convert ABC to 012
[ 1, a, a**2 ], # (i.e. phase to sequence)
[ 1, a**2, a ]])
A012 = np.array([[ 1, 1, 1 ], # Convert 012 to ABC
[ 1, a**2, a ], # (i.e. sequence to phase)
[ 1, a, a**2 ]])
# Define type constants
matrix = "<class 'numpy.matrixlib.defmatrix.matrix'>"
ndarr = "<class 'numpy.ndarray'>"
# Define Phasor Generator
def phasor( mag, ang ):
"""
phasor Function:
Generates the standard Pythonic complex representation
of a phasor voltage or current when given the magnitude
and angle of the specific voltage or current.
Parameters
----------
mag: float
The Magnitude of the Voltage/Current
ang: float
The Angle (in degrees) of the Voltage/Current
Returns
-------
phasor: complex
Standard Pythonic Complex Representation of
the specified voltage or current.
"""
return( c.rect( mag, np.radians( ang ) ) )
# Define Reactance Calculator
def reactance(z,f=60,sensetivity=1e-12):
"""
reactance Function:
Calculates the Capacitance or Inductance in Farads or Henreys
(respectively) provided the impedance of an element.
Will return capacitance (in Farads) if ohmic impedance is
negative, or inductance (in Henrys) if ohmic impedance is
positive. If imaginary: calculate with j factor (imaginary number).
Parameters
----------
z: complex
The Impedance Provided, may be complex (R+jI)
f: float, optional
The Frequency Base for Provided Impedance, default=60
sensetivity: float, optional
The sensetivity used to check if a resistance was
provided, default=1e-12
Returns
-------
out: float
Capacitance or Inductance of Impedance
"""
# Evaluate Omega
w = 2*np.pi*f
# Input is Complex
if isinstance(z, complex):
# Test for Resistance
if(abs(z.real) > sensetivity):
R = z.real
else:
R = 0
if (z.imag > 0):
out = z/(w*1j)
else:
out = 1/(w*1j*z)
out = abs(out)
# Combine with resistance if present
if(R!=0): out = (R, out)
else:
if (z > 0):
out = z/(w)
else:
out = 1/(w*z)
out = abs(out)
# Return Output
return(out)
# Define display function
def cprint(val,unit="",label="",printval=True,ret=False,round=3):
"""
cprint Function
This function is designed to accept a complex value (val) and print
the value in the standard electrical engineering notation:
**magnitude ∠ angle °**
This function will print the magnitude in degrees, and can print
a unit and label in addition to the value itself.
Parameters
----------
val: complex
The Complex Value to be Printed, may be singular value,
tuple of values, or list/array.
unit: string, optional
The string to be printed corresponding to the unit mark.
default=""
label: string, optional
The pre-pended string used as a descriptive labeling string.
default=""
printval: bool, optional
Control argument enabling/disabling printing of the string.
default=True
ret: bool, optional
Control argument allowing the evaluated value to be returned.
default=False
round: int, optional
Control argument specifying how many decimals of the complex
value to be printed. May be negative to round to spaces
to the left of the decimal place (follows standard round()
functionality). default=3
Returns
-------
numarr: numpy.ndarray
The array of values corresponding to the magnitude and angle,
values are returned in the form: [[ mag, ang ],...,[ mag, ang ]]
where the angles are evaluated in degrees.
"""
printarr = np.array([]) # Empty array
numarr = np.array([]) # Empty array
# Find length of the input array
try:
len(val) # Test Case, For more than one Input
val = np.asarray(val) # Ensure that input is array
shp = val.shape
if(len(shp) > 1):
row, col = shp
else:
col = shp[0]
row = 1
val = val.reshape((col,row))
col = row
row = shp[0]
sz = val.size
mult = True
# Handle Label for Each Element
if label=="":
label = np.array([])
for _ in range(sz):
label = np.append(label,[""])
elif len(label)==1 or isinstance(label, str):
tmp = label
for _ in range(sz):
label = np.append(label,[tmp])
# Handle Unit for Each Element
if unit=="":
unit = np.array([])
for _ in range(sz):
unit = np.append(unit,[""])
elif len(unit)==1 or str(type(unit))==tstr:
tmp = unit
for _ in range(sz):
unit = np.append(unit,[tmp])
except:
row = 1
col = 1
sz = 1
mult = False
_label = label
_unit = unit
# For each value in the input (array)
for i in range(row):
if mult:
_val = val[i]
_label = label[i]
_unit = unit[i]
else:
_val = val
_label = label
_unit = unit
mag, ang_r = c.polar(_val) #Convert to polar form
ang = | np.degrees(ang_r) | numpy.degrees |
from collections import Counter
import numpy as np
from loguru import logger
from scipy.special import logsumexp
from sklearn import metrics
__all__ = ["evaluate_data_split"]
def evaluate_data_split(split, targets, estimator, prob_predictions):
res = dict()
predictions = estimator.classes_[prob_predictions.argmax(axis=1)]
for tr_val_te in split.unique():
inds = split == tr_val_te
yy, pp, ss = targets[inds], predictions[inds], prob_predictions[inds]
res[tr_val_te] = dict()
res[tr_val_te] = _classification_perf_metrics(
model=estimator, labels=np.asarray(yy).ravel(), predictions=np.asarray(pp), scores= | np.asarray(ss) | numpy.asarray |
"""Dispersion code."""
import functools
from os import PathLike
import typing
from typing import Any, cast, Dict, Iterable, List, Optional, Sequence, Union
import numpy as np
import scipy.spatial
from morfeus.calculators import D3Calculator, D3Grimme, D4Grimme
from morfeus.data import ANGSTROM_TO_BOHR, atomic_symbols, HARTREE_TO_KCAL, jmol_colors
from morfeus.geometry import Atom
from morfeus.io import CubeParser, D3Parser, D4Parser, read_geometry, VertexParser
from morfeus.sasa import SASA
from morfeus.typing import Array1D, Array2D, ArrayLike1D, ArrayLike2D
from morfeus.utils import convert_elements, get_radii, Import, requires_dependency
if typing.TYPE_CHECKING:
from matplotlib.colors import hex2color
import pymeshfix
import pyvista as pv
from pyvistaqt import BackgroundPlotter
import vtk
class Dispersion:
"""Calculates and stores the results for the 🍺P_int dispersion descriptor.
The descriptor is defined in 10.1002/anie.201905439. Morfeus can compute it based on
a surface either from vdW radii, surface vertices or the electron density.
Dispersion can be obtained with the D3 or D4 model.
Args:
elements: Elements as atomic symbols or numbers
coordinates: Coordinates (Å)
radii: VdW radii (Å)
radii_type: Choice of vdW radii: 'alvarez', 'bondi', 'crc', 'rahm' and 'truhlar'
point_surface: Use point surface from vdW radii
compute_coefficients: Whether to compute D3 coefficients with internal code
density: Area per point (Ų) on the vdW surface
excluded_atoms: Atoms to exclude (1-indexed). Used for substituent P_ints
included_atoms: Atoms to include. Used for functional group P_ints
Attributes:
area: Area of surface (Ų)
atom_areas: Atom indices as keys and atom areas as values (Ų)
atom_p_int: Atom indices as keys and P_int as values (kcal¹ᐟ² mol⁻¹⸍²))
atom_p_max: Atom indices as keys and P_max as values (kcal¹ᐟ² mol⁻¹ᐟ²)
atom_p_min: Atom indices as keys and P_min as values( kcal¹ᐟ² mol⁻¹ᐟ²)
p_int: P_int value for molecule (kcal¹ᐟ² mol⁻¹ᐟ²)
p_max: Highest P value (kcal¹ᐟ² mol⁻¹ᐟ²)
p_min: Lowest P value (kcal¹ᐟ² mol⁻¹ᐟ²)
p_values: All P values (kcal¹ᐟ² mol⁻¹ᐟ²)
volume: Volume of surface (ų)
Raises:
Exception: When both exluded_atoms and included_atom are given
"""
area: float
atom_areas: Dict[int, float]
atom_p_int: Dict[int, float]
atom_p_max: Dict[int, float]
atom_p_min: Dict[int, float]
p_int: float
p_max: float
p_min: float
p_values: Array1D
volume: float
_atoms: List[Atom]
_c_n_coefficients: Dict[int, Array1D]
_density: float
_excluded_atoms: List[int]
_point_areas: Array1D
_point_map: Array1D
_points: Array2D
_radii: Array1D
_surface: "pv.PolyData"
def __init__(
self,
elements: Union[Iterable[int], Iterable[str]],
coordinates: ArrayLike2D,
radii: Optional[ArrayLike1D] = None,
radii_type: str = "rahm",
point_surface: bool = True,
compute_coefficients: bool = True,
density: float = 0.1,
excluded_atoms: Optional[Sequence[int]] = None,
included_atoms: Optional[Sequence[int]] = None,
) -> None:
# Check that only excluded or included atoms are given
if excluded_atoms is not None and included_atoms is not None:
raise Exception("Give either excluded or included atoms but not both.")
# Converting elements to atomic numbers if the are symbols
elements = convert_elements(elements, output="numbers")
coordinates = np.array(coordinates)
# Set excluded atoms
all_atoms = set(range(1, len(elements) + 1))
if included_atoms is not None:
included_atoms_ = set(included_atoms)
excluded_atoms = list(all_atoms - included_atoms_)
elif excluded_atoms is None:
excluded_atoms = []
else:
excluded_atoms = list(excluded_atoms)
self._excluded_atoms = excluded_atoms
# Set up
self._surface = None
self._density = density
# Getting radii if they are not supplied
if radii is None:
radii = get_radii(elements, radii_type=radii_type)
radii = np.array(radii)
self._radii = radii
# Get vdW surface if requested
if point_surface:
self._surface_from_sasa(elements, coordinates)
else:
# Get list of atoms as Atom objects
atoms: List[Atom] = []
for i, (element, coord, radius) in enumerate(
zip(elements, coordinates, radii), start=1
):
atom = Atom(element, coord, radius, i)
atoms.append(atom)
self._atoms = atoms
# Calculate coefficients
if compute_coefficients:
self.compute_coefficients(model="id3")
# Calculatte P_int values
if point_surface and compute_coefficients:
self.compute_p_int()
def _surface_from_sasa(
self,
elements: Union[Iterable[int], Iterable[str]],
coordinates: ArrayLike2D,
) -> None:
"""Get surface from SASA."""
sasa = SASA(
elements,
coordinates,
radii=self._radii,
density=self._density,
probe_radius=0,
)
self._atoms = sasa._atoms
self.area = sum(
[
atom.area
for atom in self._atoms
if atom.index not in self._excluded_atoms
]
)
self.atom_areas = sasa.atom_areas
self.volume = sum(
[
atom.volume
for atom in self._atoms
if atom.index not in self._excluded_atoms
]
)
# Get point areas and map from point to atom
point_areas: List[np.ndarray] = []
point_map = []
for atom in self._atoms:
n_points = len(atom.accessible_points)
if n_points > 0:
point_area = atom.area / n_points
else:
point_area = 0.0
atom.point_areas = np.repeat(point_area, n_points)
point_areas.extend(atom.point_areas)
point_map.extend([atom.index] * n_points)
self._point_areas = np.array(point_areas)
self._point_map = np.array(point_map)
@requires_dependency([Import(module="pyvista", alias="pv")], globals())
def surface_from_cube(
self,
file: Union[str, PathLike],
isodensity: float = 0.001,
method: str = "flying_edges",
) -> "Dispersion":
"""Adds an isodensity surface from a Gaussian cube file.
Args:
file: Gaussian cube file
isodensity: Isodensity value (electrons/bohr³)
method: Method for contouring: 'contour' or 'flying_edges
Returns:
self: Self
"""
# Parse the cubefile
parser = CubeParser(file)
# Generate grid and fill with values
grid = pv.UniformGrid()
grid.dimensions = np.array(parser.X.shape)
grid.origin = (parser.min_x, parser.min_y, parser.min_z)
grid.spacing = (parser.step_x, parser.step_y, parser.step_z)
grid.point_arrays["values"] = parser.S.flatten(order="F")
self.grid = grid
# Contour and process the surface
surface = self._contour_surface(grid, method=method, isodensity=isodensity)
self._surface = surface
self._process_surface()
return self
@requires_dependency(
[Import("pymeshfix"), Import(module="pyvista", alias="pv")], globals()
)
def surface_from_multiwfn(
self, file: Union[str, PathLike], fix_mesh: bool = True
) -> "Dispersion":
"""Adds surface from Multiwfn vertex file with connectivity information.
Args:
file: Vertex.pdb file
fix_mesh: Whether to fix holes in the mesh with pymeshfix (recommended)
Returns:
self: Self
"""
# Read the vertices and faces from the Multiwfn output file
parser = VertexParser(file)
vertices = np.array(parser.vertices)
faces = np.array(parser.faces)
faces = np.insert(faces, 0, values=3, axis=1)
# Construct surface and fix it with pymeshfix
surface = pv.PolyData(vertices, faces, show_edges=True)
if fix_mesh:
meshfix = pymeshfix.MeshFix(surface)
meshfix.repair()
surface = meshfix.mesh
# Process surface
self._surface = surface
self._process_surface()
return self
def _process_surface(self) -> None:
"""Extracts face center points and assigns these to atoms based on proximity."""
# Get the area and volume
self.area = self._surface.area
self.volume = self._surface.volume
# Assign face centers to atoms according to Voronoi partitioning
coordinates = np.array([atom.coordinates for atom in self._atoms])
points = np.array(self._surface.cell_centers().points)
kd_tree = scipy.spatial.cKDTree(coordinates)
_, point_regions = kd_tree.query(points, k=1)
point_regions = point_regions + 1
# Compute faces areas
area_data = self._surface.compute_cell_sizes()
areas = np.array(area_data.cell_arrays["Area"])
# Assign face centers and areas to atoms
atom_areas = {}
for atom in self._atoms:
atom.accessible_points = points[point_regions == atom.index]
point_areas = areas[point_regions == atom.index]
atom.area = np.sum(point_areas)
atom.point_areas = point_areas
atom_areas[atom.index] = atom.area
# Set up attributes
self.atom_areas = atom_areas
self._point_areas = areas
self._point_map = point_regions
@requires_dependency(
[Import(module="pyvista", alias="pv"), Import("vtk")], globals()
)
@staticmethod
def _contour_surface(
grid: "pv.Grid", method: str = "flying_edges", isodensity: float = 0.001
) -> "pv.PolyData":
"""Counter surface from grid.
Args:
grid: Electron density as PyVista Grid object
isodensity: Isodensity value (electrons/bohr³)
method: Method for contouring: 'contour' or 'flying_edges
Returns:
surface: Surface as Pyvista PolyData object
"""
# Select method for contouring
if method == "flying_edges":
contour_filter = vtk.vtkFlyingEdges3D()
elif method == "contour":
contour_filter = vtk.vtkContourFilter()
# Run the contour filter
isodensity = isodensity
contour_filter.SetInputData(grid)
contour_filter.SetValue(0, isodensity)
contour_filter.Update()
surface = contour_filter.GetOutput()
surface = pv.wrap(surface)
return surface
def compute_p_int(self, points: Optional[ArrayLike2D] = None) -> "Dispersion":
"""Compute P_int values for surface or points.
Args:
points: Points to compute P values for
Returns:
self: Self
"""
# Set up atoms and coefficients that are part of the calculation
atom_indices = np.array(
[
atom.index - 1
for atom in self._atoms
if atom.index not in self._excluded_atoms
]
)
coordinates = np.array([atom.coordinates for atom in self._atoms])
coordinates = coordinates[atom_indices]
c_n_coefficients = dict(self._c_n_coefficients)
for key, value in c_n_coefficients.items():
c_n_coefficients[key] = np.array(value)[atom_indices] * HARTREE_TO_KCAL
# Take surface points if none are given
if points is None:
points = np.vstack(
[
atom.accessible_points
for atom in self._atoms
if atom.index not in self._excluded_atoms
and atom.accessible_points.size > 0
]
)
atomic = True
else:
points = np.array(points)
# Calculate p_int for each point
dist = scipy.spatial.distance.cdist(points, coordinates) * ANGSTROM_TO_BOHR
p = np.sum(
[
np.sum(np.sqrt(coefficients / (dist**order)), axis=1)
for order, coefficients in c_n_coefficients.items()
],
axis=0,
)
p = cast(np.ndarray, p)
self.p_values = p
# Take out atomic p_ints if no points are given
if atomic:
atom_p_max = {}
atom_p_min = {}
atom_p_int = {}
i_start = 0
for atom in self._atoms:
if atom.index not in self._excluded_atoms:
n_points = len(atom.accessible_points)
if n_points > 0:
i_stop = i_start + n_points
atom_ps = p[i_start:i_stop]
atom.p_values = atom_ps
atom_p_max[atom.index] = np.max(atom_ps)
atom_p_min[atom.index] = np.min(atom_ps)
atom_p_int[atom.index] = np.sum(
atom_ps * atom.point_areas / atom.area
)
i_start = i_stop
else:
atom_p_max[atom.index] = 0
atom_p_min[atom.index] = 0
atom_p_int[atom.index] = 0
atom.p_values = np.array([])
self.atom_p_max = atom_p_max
self.atom_p_min = atom_p_min
self.atom_p_int = atom_p_int
point_areas = self._point_areas[np.isin(self._point_map, atom_indices + 1)]
self.p_int = np.sum(p * point_areas / self.area)
# Calculate p_min and p_max with slight modification to Robert's
# definitions
self.p_min = np.min(p)
self.p_max = np.max(p)
# Map p_values onto surface
if self._surface:
mapped_p = np.zeros(len(p))
for atom in self._atoms:
if atom.index not in self._excluded_atoms:
mapped_p[self._point_map == atom.index] = atom.p_values
self._surface.cell_arrays["values"] = mapped_p
self._surface = self._surface.cell_data_to_point_data()
# Store points for later use
self._points = points
return self
def compute_coefficients(
self, model: str = "id3", order: int = 8, charge: int = 0
) -> "Dispersion":
"""Compute dispersion coefficients.
Can either use internal D3 model or D4 or D3-like model available through
Grimme's dftd4 program.
Args:
model: Calculation model: 'id3'. 'gd3' or 'gd4'
order: Order of the Cᴬᴬ coefficients
charge: Molecular charge for D4 model
Returns:
self: Self
Raises:
ValueError: When model not supported
"""
# Set up atoms and coordinates
elements = [atom.element for atom in self._atoms]
coordinates = | np.array([atom.coordinates for atom in self._atoms]) | numpy.array |
import sys, os
import numpy as np
from numpy.linalg import norm
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import logging
import traceback
import timeit
import time
import math
from ast import literal_eval as make_tuple
import platform
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
import glob
import pickle
import myFunctions as mf
import copy
from operator import itemgetter
from os.path import join
import inspect
from scipy.optimize import fsolve, fmin_tnc, least_squares, differential_evolution, minimize, fmin_l_bfgs_b, basinhopping
import myFunctions as mf
from scipy import stats
class FluidNetwork(object):
"""
Unified framework for doing the fluid simulation. At this stage, the graph used has already been reduced, i.e., each
edge represens a segment in `segmentList` and each node represents a bifurcation. Previously, each segment may be
consisted of one or more edges. To reduce the previous graph, use the function `reduceGraph`. Also, for the sake of
consistency, the original `segmentInfoDict` has been renamed to `edgeInfoDict`, `segmentList` to `edgeList`, but
`nodeInfoDict` remains the same. Besides, the nodes and edges are now indexed by integers starting from zero for
simplicity. Use the function `convertGraph` to do the conversion.
So the `fundemental staff` that you need to have are: `edgeList`, `edgeInfoDict`, `nodeInfoDict`, `G`. These are
necessary to all of the subsquent analysis. To perform a blood flow simulation, you need to do the following steps:
1. Get the graph and the `fundemental staff` by either creating one or loading an existing one.
2. Set c and k (used in H-W equation) for each edge by `setNetwork`.
3. Set the terminating pressures by `setTerminatingPressure`.
4. Generate H-W equations for each edge and flow conservation equations for each node by `setupFluidEquations`.
5. Solve the equations by optimization and use `computerNetworkDetail` as objective function.
The fluid simulation tries to solve the network by finding a set of pressures for each node and a set of flow rates
for each edges such that H-W equations and flow conservation equations are satisfied with the given set of
terminating pressures. For a binary tree structure without merges, a solution is guaranteed to exist no matter what
the terminating pressures look like. However, for the network with merges (e.g., the GBM network with CoW), it is
possible that a solution does not exist for the given set of terminating pressures. Therefore, for these cases, we
need to check the optimization result and check whether the error in each equations are within a acceptable range.
Note that not all the functions in this class should be used. Some are just for experimental purposes!
"""
def __init__(self):
self.directory = os.path.abspath(os.path.dirname(__file__))
self.edgeList = []
self.edgeIndexList = []
self.G = nx.Graph()
self.rootNode = 0
self.edgeInfoDict = {}
self.nodeInfoDict = {}
self.nodeIndex = 0
self.edgeIndex = 0
self.spacing = 0.00040 # meter/voxel
self.eqnInfoDictList = []
self.velocityPressure = []
self.velocityPressureGroundTruth = []
self.distributeFlowEqnDict = {}
self.nodeInfoDictBefore = {}
self.nodeInfoDictAfter = {}
self.edgeInfoDictBefore = {}
self.edgeInfoDictAfter = {}
def generateNetwork(self, maxDepth=10, allowMerge=False):
"""
Generate a binary tree with random edge and node properties.
Parameters
----------
maxDepth : int, optional
Maximum depth of the graph (depth start from zero).
allowMerge : bool, optional
If True, there will be 30% change that two edges at the same depth will merge together.
"""
G = nx.Graph()
nodeDepth, edgeDepth = 0, 0
G.add_node(0, depth=nodeDepth, depthLevel=nodeDepth, nodeIndex=self.nodeIndex, isEntryNode=True) # first node
self.nodeIndex += 1
while nodeDepth <= maxDepth - 1:
nodesAtCurrentDepth = [node for node in G.nodes() if G.node[node]['depth'] == nodeDepth]
if len(nodesAtCurrentDepth) > 2:
# Determine if merge would happen
if allowMerge:
mergeAtCurrentDepth = (np.random.rand() <= 0.3) # 30% probability TODO: this should be controlled by function arguments
else:
mergeAtCurrentDepth = False
# Merge nodes if allowed
if mergeAtCurrentDepth:
numOfMerges = 1 # TODO: this should be controlled by function arguments
nodesToMerge = np.random.choice(nodesAtCurrentDepth, 2, replace=False)
newNode = self.nodeIndex
newEdgeIndex1, newEdgeIndex2 = self.edgeIndex, self.edgeIndex + 1 # TODO: allow >2 edge merge?
G.add_edge(nodesToMerge[0], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.add_edge(nodesToMerge[1], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex + 1, segmentIndex=self.edgeIndex + 1)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 2
for currentNode in nodesAtCurrentDepth:
numOfChildEdges = len([node for node in G[currentNode].keys() if G.node[node]['depth'] > nodeDepth])
numOfNewEdges = 2 - numOfChildEdges # TODO: allow for more child edges?
for ii in range(numOfNewEdges):
newNode = self.nodeIndex
G.add_edge(currentNode, newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 1
nodeDepth += 1
edgeDepth += 1
# Gather data
edgeList = [0] * self.edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
nodeInfoDict, edgeInfoDict = {}, {}
for node in G.nodes():
nodeInfoDict[node] = G.node[node]
nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[node]['coord'] = []
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeInfoDict[edgeIndex] = G[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Save
self.G = G
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadNetwork(self, version=2, year=2013):
"""
Load old version of data files (that needs to be converted).
"""
directory = self.directory
if version == 1:
filename = 'basicFilesForStructureWithCoW(year={}).pkl'.format(year)
elif version == 2:
filename = 'basicFilesForStructureWithCoW2(year={}).pkl'.format(year)
elif version == 3:
filename = 'basicFilesForStructureWithCoW3(year={}).pkl'.format(year)
elif version == 4:
filename = 'basicFilesForStructureWithCoW4(year={}).pkl'.format(year)
with open(join(directory, filename), 'rb') as f:
resultDict = pickle.load(f)
with open(join(directory, 'partitionInfo.pkl'), 'rb') as f:
partitionInfo = pickle.load(f)
with open(join(directory, 'chosenVoxelsForPartition.pkl'), 'rb') as f:
chosenVoxels = pickle.load(f)
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
resultDict['resultADANDict'] = resultADANDict
resultDict['partitionInfo'] = partitionInfo
resultDict['chosenVoxels'] = chosenVoxels
self.loadedNetwork = resultDict
def reduceGraph(self, G, segmentList, segmentIndexList):
"""
Reduce the graph such that the node is either terminating or bifurcating point.
Parameters
----------
G : NetworkX graph
The graph representation of the network.
segmentList : list
A list of segments in which each segment is a simple branch.
segmentIndexList : list
A list of segment indices referring to the segments actually be used in `segmentList`.
Returns
-------
DG : NetworkX graph
The reduced graph (each edge refers to a segment).
"""
DG = nx.DiGraph()
for segmentIndex in segmentIndexList:
segment = segmentList[segmentIndex]
head, tail, secondNode = segment[0], segment[-1], segment[1]
headLevel, tailLevel = G.node[head]['depthLevel'], G.node[tail]['depthLevel']
if headLevel > tailLevel:
head, tail, secondNode = tail, head, segment[-2]
headLevel, tailLevel = tailLevel, headLevel
DG.add_path([head, tail])
for key, value in G[head][secondNode].items():
DG[head][tail][key] = value
for key, value in G.node[head].items():
DG.node[head][key] = value
for key, value in G.node[tail].items():
DG.node[tail][key] = value
return DG
def convertNetowrk(self):
"""
Convert the old version of data files into the new version used here.
"""
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
segmentIndexList = list(segmentInfoDictOld.keys())
heartLoc = (255, 251, 26) # change as needed
DG = self.reduceGraph(GOld, segmentList, segmentIndexList)
G = nx.Graph()
nodeInfoDict, edgeInfoDict = {}, {}
nodeIndex, edgeIndex = 0, 0
maxNodeDepth = np.max([DG.node[node]['depthLevel'] for node in DG.nodes()])
for currentDepth in range(maxNodeDepth + 1):
nodesAtCurrentDepth = [node for node in DG.nodes() if DG.node[node]['depthLevel'] == currentDepth]
for node in nodesAtCurrentDepth:
G.add_node(nodeIndex, depth=DG.node[node]['depthLevel'], nodeIndex=nodeIndex, coord=node)
DG.node[node]['nodeIndexHere'] = nodeIndex
if node == heartLoc:
G.node[nodeIndex]['isEntryNode'] = True
rootNode = nodeIndex
else:
G.node[nodeIndex]['isEntryNode'] = False
nodeIndex += 1
for edge in DG.edges():
depth = np.min([DG.node[edge[0]]['depthLevel'], DG.node[edge[1]]['depthLevel']])
DG[edge[0]][edge[1]]['depth'] = depth
maxEdgeDepth = np.max([DG[edge[0]][edge[1]]['depth'] for edge in DG.edges()])
for currentDepth in range(maxEdgeDepth + 1):
edgesAtCurrentDepth = [edge for edge in DG.edges() if DG[edge[0]][edge[1]]['depth'] == currentDepth]
for edge in edgesAtCurrentDepth:
G.add_edge(DG.node[edge[0]]['nodeIndexHere'], DG.node[edge[1]]['nodeIndexHere'], depth=currentDepth, edgeIndex=edgeIndex)
edgeIndex += 1
currentNodeIndex = nodeIndex
currentEdgeIndex = edgeIndex
edgeList = [[]] * edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
for node in DG.nodes():
nodeIndexHere = DG.node[node]['nodeIndexHere']
nodeInfoDict[nodeIndexHere] = DG.node[node]
nodeInfoDict[nodeIndexHere]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[nodeIndexHere]['coord'] = []
for edge in DG.edges():
edgeIndex = G[DG.node[edge[0]]['nodeIndexHere']][DG.node[edge[1]]['nodeIndexHere']]['edgeIndex']
segmentIndex = DG[edge[0]][edge[1]]['segmentIndex']
edgeInfoDict[edgeIndex] = DG[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['length'] = DG[edge[0]][edge[1]]['pathLength'] # backward compatibility
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Sync between G and nodeInfoDict
for node in G.nodes():
for key, value in G.node[node].items():
nodeInfoDict[node][key] = value
# Save
self.G = G
self.edgeIndex = currentEdgeIndex
self.nodeIndex = currentNodeIndex
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.rootNode = rootNode
def adjustNetwork(self):
"""
If the network changes, recheck the correspondence between branch name and edgeIndex!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# LICA(Pre)
edgeInfoDict[0]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[0]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# LICA(Post)
edgeInfoDict[3]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[3]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Pre)
edgeInfoDict[2]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[2]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Post)
edgeInfoDict[7]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[7]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# VA
# edgeInfoDict[1]['meanRadius'] = 2.0 / (spacing * 1000) # mm->voxel
edgeInfoDict[1]['length'] = 28 / (spacing * 1000) # mm->voxel
# RPCAComm
edgeInfoDict[4]['length'] = 16 / (spacing * 1000) # mm->voxel
# RMCA(first segment)
# edgeInfoDict[12]['length'] = 8 / (spacing * 1000) # mm->voxel
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def setNetwork(self, option=1, extraInfo=None):
"""
Set c and k (and possibly radius and length) for each branch
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# Use BraVa data to set the radius and ADAN result to set the c and k
if option == 1:
minSetLength, maxSetLength = 1, 70 # mm
# Params used to fit radius to edgeLevel using the BraVa data. radius (mm) = a * np.exp(-b * edgeLevel) + c
fitResultDict = {'LMCA': {'param': [0.5569, 0.4199, 0.469]}, 'RMCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LPCA': {'param': [0.6571, 0.3252, 0.2949]}, 'RPCA': {'param': [0.7103, 0.5587, 0.3815]}, 'ACA': {'param': [0.3604, 1.0538, 0.4714]}} # new names
# fitResultDict = {'LCA': {'param': [0.5569, 0.4199, 0.469]}, 'RCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LACA': {'param': [0.6571, 0.3252, 0.2949]}, 'RACA': {'param': [0.7103, 0.5587, 0.3815]}, 'PCA': {'param': [0.3604, 1.0538, 0.4714]}} # old names
a, b, c = fitResultDict['LMCA']['param']
for edgeIndex in edgeIndexList:
edgeLevel = edgeInfoDict[edgeIndex]['depth']
radius = (a * np.exp(-b * edgeLevel) + c) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['meanRadius'] = radius # voxel
length = (np.random.rand() * (maxSetLength - minSetLength) + minSetLength) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['pathLength'] = length # for backward compatibility
edgeInfoDict[edgeIndex]['length'] = length # voxel
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
c = c if c > 0 else 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Only set c and k using ADAN result
elif option == 2:
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
if extraInfo is not None:
excludedEdgeIndex = itemgetter('excludedEdgeIndex')(extraInfo)
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
# print('slopeCRadius={}, interceptCRadius={}'.format(slopeCRadius, interceptCRadius))
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
# if extraInfo is not None:
# edgeIndexListToUse = [edgeIndex for edgeIndex in edgeIndexList if edgeIndex not in excludedEdgeIndex]
# else:
# edgeIndexListToUse = edgeIndexList
edgeIndexListToUse = edgeIndexList
for edgeIndex in edgeIndexListToUse:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
# c = c if c > 0 else 0.1
if radius * 1000 >= 1.5 and radius * 1000 <= 2.5:
c = 1
else:
if c < 0:
c = 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def showFlowInfo(self):
"""
Print out flow rates for selected edges and pressure for selected nodes.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for edgeIndex in range(16):
flow = edgeInfoDict[edgeIndex]['simulationData']['flow']
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(edgeInfoDict[edgeIndex])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1 #
radius *= (spacing * 100) # convert to cm
length *= (spacing * 100) # convert to cm
print('Edge {}: flow={:.3f} cm^3/s, radius={:.4f} cm, length={:.4f} cm, c={:.4f}, k={:.4f}'.format(edgeIndex, flow, radius, length, c, k))
print('\n')
for node in range(16):
flow, pressure = itemgetter('flow', 'pressure')(nodeInfoDict[node]['simulationData'])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1
if pressure is not None:
pressure /= (13560*9.8/1000) # convert to mmHg
else:
pressure = -1
print('Node {}: flow={:.3f} cm^3/s, pressure={:.3f} mmHg'.format(node, flow, pressure))
def getFlowInfoFromDeltaPressure(self, edgeIndex, deltaPressure):
"""
Calculate the required flow/velocity in order to achieve the given pressure drop for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
deltaPressure : float
The desired pressure drop with a unit of Pascal.
Returns
-------
flow : float
The required flow rate to achieve the desired pressure drop with a unit of cm^3/s.
velocity : float
The velocity in that edge corresponding to the required flow rate.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
flow = np.power(deltaPressure * c**k * (2*radius)**4.8704 / 10.67 / length, 1/k) # m^3/s
velocity = flow / (np.pi * radius**2) # m/s
return flow, velocity
def getDeltaPressureFromFlow(self, edgeIndex, flow):
"""
Calculate the required pressure drop in order to achieve the given flow for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
flow : float
The desired flow rate of the edge with a unit of cm^3/s.
Returns
-------
deltaPressure : float
The required pressure drop in the edge to achieve the desired flow rate with a unit of Pascal.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
deltaPressure = 10.67 * flow**k * length / c**k / (2*radius)**4.8704
return deltaPressure
def createGroundTruth(self, seed=None, option=1):
"""
Manually set the velocity and pressure for all edges/nodes in order to check whether the solver is correct.
Option 1: each child branch randomly takes ~1/N (with some random fluctuation) of the parent flow.
Option 2: flow is split proportional to the cross sectional area of the child branches.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
success = False
# Set argsIndex (index of pressure/velocity unknowns in the fluid simulation)
argsIndex = 0
for edgeIndex in edgeIndexList:
edgeInfoDict[edgeIndex]['argsIndex'] = argsIndex
argsIndex += 1
for node in G.nodes():
nodeInfoDict[node]['isBifurcatingNode'] = False
nodeList = [node for node in G.nodes() if node != 0 and G.degree(node) != 1]
for node in nodeList:
nodeInfoDict[node]['argsIndex'] = argsIndex
nodeInfoDict[node]['isBifurcatingNode'] = True
argsIndex += 1
minSetVelocity, maxSetVelocity = 0.01, 3 # m/s
inletPressure = 13560 * 9.8 * 0.12 # Pascal
inletVelocity = 1.5 # m/s
inletFlow = 754/60/10**6 # m^3/s
minSplitAmout, maxSplitAmout = 0.4, 0.6
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth):
## first deal with the nodes whose child edge merges
nodesAtNextDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth + 1]
for nodeAtNextDepth in nodesAtNextDepth:
parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] == currentDepth]
# parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[nodeAtNextDepth]['depth']]
if len(parentNodes) > 1:
# print('Node {} merge into {}'.format(parentNodes, nodeAtNextDepth))
flowAtParentNodes = [nodeInfoDict[node]['simulationData']['flow'] for node in parentNodes] # m^3/s
degreeAtParentNodes = [G.degree(node) for node in parentNodes]
pressureAtParentNodes = [nodeInfoDict[node]['simulationData']['pressure'] for node in parentNodes]
parentEdgeIndexList = [G[nodeAtNextDepth][node]['edgeIndex'] for node in parentNodes]
parentEdgeDeltaPressureList = [self.getDeltaPressureFromFlow(edgeIndex, flow) for edgeIndex, flow in zip(parentEdgeIndexList, flowAtParentNodes)]
nodeMinPressureList = [headPressure - deltaPressure for headPressure, deltaPressure in zip(pressureAtParentNodes, parentEdgeDeltaPressureList)]
if degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] > 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] > 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, True
if nodeMinPressureList[0] != nodeMinPressureList[1]:
success = False
print('Error! Two straight edges cannot achieve the same end pressure')
return success
print('Warning! Two straight edges merge into one node')
else:
if nodeMinPressureList[0] > nodeMinPressureList[1]:
loc1, loc2 = 0, 1
else:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = False, False
edgeIndex1, edgeIndex2 = parentEdgeIndexList[loc1], parentEdgeIndexList[loc2]
parentNode1, parentNode2 = parentNodes[loc1], parentNodes[loc2]
parentPressure1, parentPressure2 = pressureAtParentNodes[loc1], pressureAtParentNodes[loc2]
parentFlow1, parentFlow2 = flowAtParentNodes[loc1], flowAtParentNodes[loc2]
radius1, radius2 = edgeInfoDict[edgeIndex1]['meanRadius'] * spacing, edgeInfoDict[edgeIndex2]['meanRadius'] * spacing
length1, length2 = edgeInfoDict[edgeIndex1]['length'] * spacing, edgeInfoDict[edgeIndex2]['length'] * spacing
c1, c2 = edgeInfoDict[edgeIndex1]['c'], edgeInfoDict[edgeIndex2]['c']
k1, k2 = edgeInfoDict[edgeIndex1]['k'], edgeInfoDict[edgeIndex2]['k']
flowCounter = 0
# for the first edge
maxPossibleFlow = parentFlow1
minDeltaPressure = np.max([0, pressureAtParentNodes[loc1] - pressureAtParentNodes[loc2]])
minPossibleFlow, _ = self.getFlowInfoFromDeltaPressure(parentEdgeIndexList[loc1], minDeltaPressure)
if minPossibleFlow > maxPossibleFlow:
success = False
print('Error while merging node {} to node {}, minPossibleFlow ({}) is larger than maxPossibleFlow ({})'.format(parentNodes, nodeAtNextDepth, minPossibleFlow, maxPossibleFlow))
return success
if isEdge1StraightPipe:
flow1 = parentFlow1
if flow1 >= minPossibleFlow and flow1 <= maxPossibleFlow:
pass
else:
print('Edge {} wants to use all flow={} from node {}, but possible range is [{}, {}]'.format(edgeIndex1, flow1, parentNode1, minPossibleFlow, maxPossibleFlow))
else:
# flow1 = np.random.rand() * (maxPossibleFlow - minPossibleFlow) + minPossibleFlow
flow1 = (maxPossibleFlow + minPossibleFlow) / 2
## Manual manipulation !!! ##
if nodeAtNextDepth == 10:
if edgeIndex1 == 9:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
elif edgeIndex1 == 11:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
# radius8, radius9 = edgeInfoDict[8]['meanRadius'], edgeInfoDict[9]['meanRadius']
# flow9 = maxPossibleFlow * radius9**2 / (radius8**2 + radius9**2)
# print('Edge {} get flow={}'.format(edgeIndex1, flow1))
velocity1 = flow1 / (np.pi * radius1**2) # m/s
edgeInfoDict[edgeIndex1]['simulationData']['velocity'] = velocity1
edgeInfoDict[edgeIndex1]['simulationData']['flow'] = flow1
deltaPressure1 = 10.67 * flow1**k1 * length1 / c1**k1 / (2*radius1)**4.8704
tailPressure = parentPressure1 - deltaPressure1 # pressure at the merging node
nodeInfoDict[nodeAtNextDepth]['simulationData']['pressure'] = tailPressure
flowCounter += flow1
# the other edge
deltaPressure2 = parentPressure2 - tailPressure
flow2 = np.power(deltaPressure2 / 10.67 / length2 * c2**k2 * (2*radius2)**4.8704, 1/k2)
velocity2 = flow2 / (np.pi * radius2**2) # m/s
edgeInfoDict[edgeIndex2]['simulationData']['velocity'] = velocity2
edgeInfoDict[edgeIndex2]['simulationData']['flow'] = flow2
flowCounter += flow2
nodeInfoDict[nodeAtNextDepth]['simulationData']['flow'] = flowCounter
if flow2 > parentFlow2:
print('Node {}: the flow ({}) in other edge is larger than provided ({})'.format(nodeAtNextDepth, flow2, parentFlow2))
print('edgeIndex1={}, edgeIndex2={}, flow1={}, flow2={}'.format(edgeIndex1, edgeIndex2, flow1, flow2))
print(nodeInfoDict[1]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[3]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[2]['simulationData']['pressure']/13560/9.8*1000)
## Now deal with remaining nodes
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth]
for currentNode in nodesAtCurrentDepth:
if currentDepth == 0:
nodeInfoDict[currentNode]['simulationData']['pressure'] = inletPressure
nodeInfoDict[currentNode]['simulationData']['flow'] = inletFlow
flowIn = inletFlow
pressureIn = inletPressure
# print('inletPressure={} mmHg, inletFlow={} cm^3/s, currentDepth={}'.format(inletPressure/13560/9.8*1000, inletFlow*10**6, currentDepth))
else:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
if flowIn is None:
print('Node {} has flow=None, nodesAtCurrentDepth={}'.format(currentNode, nodesAtCurrentDepth))
pressureIn = nodeInfoDict[currentNode]['simulationData']['pressure']
edgeIndexAtNextDepth = [G[currentNode][neighborNode]['edgeIndex'] for neighborNode in G[currentNode].keys() if nodeInfoDict[neighborNode]['depth'] > currentDepth]
edgeIndexToProcess = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is None]
edgeIndexCompleted = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is not None]
edgeCounter = len(edgeIndexToProcess)
flowAvailable = nodeInfoDict[currentNode]['simulationData']['flow']
for edgeIndex in edgeIndexCompleted:
flowAvailable -= edgeInfoDict[edgeIndex]['simulationData']['flow']
if flowAvailable < 0 - np.finfo(float).eps:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
flowUsed = ['Edge {}: {}'.format(edgeIndex, edgeInfoDict[edgeIndex]['simulationData']['flow']) for edgeIndex in edgeIndexCompleted]
print('Error! Node {}: flowIn={}, flowUsed={}, flowAvailable={}'.format(currentNode, flowIn, flowUsed, flowAvailable))
flowAmount = []
# Random split the flow (within a range)
if option == 1:
while edgeCounter >= 1:
if edgeCounter > 1:
basePercentage = 100 / edgeCounter
fluctuationPercentage = basePercentage / 3.0
actualPercentage = basePercentage - fluctuationPercentage/2 + np.random.rand() * fluctuationPercentage
# actualPercentage = (np.random.rand() * 0.8 + 0.1) * 100
flow = flowAvailable * actualPercentage / 100
if flow < 0:
print('Node {}: flow < 0, actualPercentage={}, flowAvailable={}'.format(currentNode, actualPercentage, flowAvailable))
flowAmount.append(flow)
flowAvailable -= flow
if flowAvailable < 0:
print('Node {}: flowAvailable < 0, actualPercentage={}'.format(currentNode, actualPercentage))
else:
flowAmount.append(flowAvailable)
edgeCounter -= 1
elif option == 2:
radiusList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexToProcess]
radiusSqList = [radius**2 for radius in radiusList]
sumOfRadiusSq = np.sum(radiusSqList)
flowAmount = [flowAvailable * radiusSq / sumOfRadiusSq for radiusSq in radiusSqList]
## Manual manipulation !!! ###
if currentNode == 0 and G.degree(currentNode) == 3:
edgeIndexToProcess = [0, 2, 1] # LICA/RICA/VA
inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
flowAmount = [inletFlow*0.4, inletFlow*0.4, inletFlow*0.2]
# elif currentNode == 8:
# edgeIndexToProcess = [16, 17] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
# elif currentNode == 9:
# edgeIndexToProcess = [18, 19] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
for edgeIndex, flow in zip(edgeIndexToProcess, flowAmount):
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = flow / (np.pi * radius**2) # m/s
edgeHead, edgeTail = edge[0], edge[1]
if nodeInfoDict[edgeHead]['depth'] > nodeInfoDict[edgeTail]['depth']:
edgeHead, edgeTail = edgeTail, edgeHead
pressureHead = nodeInfoDict[edgeHead]['simulationData']['pressure']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
deltaPressure = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704 # Pascal
if np.isnan(deltaPressure):
print('velocity={}, flow={}'.format(velocity, flow))
pressureTail = pressureHead - deltaPressure # Pascal
nodeInfoDict[edgeTail]['simulationData']['pressure'] = pressureTail
nodeInfoDict[edgeTail]['simulationData']['flow'] = flow
# print('Node {} (head={}, edgeIndex={}), flow={}'.format(edgeTail, edgeHead, edgeIndex, flow))
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# print('Pressure at {} = {} mmHg, currentDepth={}'.format(edgeTail, pressureTail/13560/9.8*1000, currentDepth))
# if edgeIndex ==5 or edgeIndex == 6:
# print('Node {}, edgeIndex={}, flow={} cm^3/s, deltaPressure={} mmHg'.format(currentNode, edgeIndex, flow*10**6, deltaPressure/13560/9.8*1000))
velocityPressure = [0] * argsIndex
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
pressure = nodeInfoDict[node]['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
velocity = edgeInfoDict[edgeIndex]['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.velocityPressure = velocityPressure # Ground truth solution
self.velocityPressureGroundTruth = velocityPressure # Ground truth solution
success = True
return success
def getVelocityPressure(self):
"""
Extract velocity and pressure from edgeInfoDict and nodeInfoDict.
Returns
-------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
velocityPressure = np.hstack((np.full((numOfEdges,), 0.0), np.full((numOfNodes,), 0.0))) # make sure dtype is float
for node, info in nodeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex, info in edgeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
return velocityPressure
def getVolumePerPartition(self):
"""
Calculate the total volume of each compartment.
Returns
volumePerPartition : dict
A dictionary with compartments names as keys and volumes (with a unit of mm^3) as corresponding values.
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
volumePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
totalVolume = 0
for edgeIndex in visitedEdges:
radius, length= itemgetter('meanRadius', 'length')(edgeInfoDict[edgeIndex])
radius = radius * spacing * 1000 # mm
length = length * spacing * 1000 # mm
edgeVolume = np.pi * radius**2 * length # mm^3
totalVolume += edgeVolume
volumePerPartition[partitionName] = totalVolume
return volumePerPartition
def showTerminatingPressureAndPathLength(self):
"""
Check terminating pressure vs path length relationship.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
terminatingNodes = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == maxDepth]
terminatingPressure = [nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000 for node in terminatingNodes] # mmHg
termiantingPathLength = []
for node in terminatingNodes:
path = nx.shortest_path(G, self.rootNode, node)
pathEdgeIndex = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in pathEdgeIndex]) # meter
termiantingPathLength.append(pathLength)
fig = plt.figure(1, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
plt.plot(termiantingPathLength, terminatingPressure, 'bo')
plt.xlabel('Path length (m)')
plt.ylabel('Terminating pressure (mmHg)')
plt.show()
def setupFluidEquations(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
There are two kinds of equations: H-W equation for each edge and flow conservation equation for each node and optionally boundary
conditions. For the H-W equation, the
information is stored in a dictionay as:
{'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
For the flow conservation equation, the information is stored as:
{'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
For the boundary conditions (inlet or outlet velocity), the information is stored as:
{'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
All of the units are SI units. The dictonaries that hold these equations are then stored in the `eqnInfoDictList`.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing for neighborIndexIn in neighborsIndexIn]
radiusOutList = [edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [edgeInfoDict[neighborIndexIn]['argsIndex'] for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [edgeInfoDict[neighborIndexOut]['argsIndex'] for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = nodeInfoDict[headNode]['simulationData']['pressure']
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = nodeInfoDict[headNode]['argsIndex']
headNodeIndex = nodeInfoDict[headNode]['nodeIndex']
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = nodeInfoDict[tailNode]['simulationData']['pressure']
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
# print('Tail node {} has pressure={} mmHg'.format(tailNode, tailPressure/13560/9.8*1000))
else:
tailPressureIndex = nodeInfoDict[tailNode]['argsIndex']
tailNodeIndex = nodeInfoDict[tailNode]['nodeIndex']
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = info['edgeIndex']
velocityIn = info['velocityIn']
edge = edgeList[edgeIndex]
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupFluidEquationsMatLab(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
Note that the Python-MatLab bridge only accepts generic python types, and thus all numpy types need to be converted.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [float(edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing) for neighborIndexIn in neighborsIndexIn]
radiusOutList = [float(edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing) for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [int(edgeInfoDict[neighborIndexIn]['argsIndex']) for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [int(edgeInfoDict[neighborIndexOut]['argsIndex']) for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': int(nodeInfoDict[node]['nodeIndex']), 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = float(edgeInfoDict[edgeIndex]['meanRadius'] * spacing)
length = float(edgeInfoDict[edgeIndex]['length'] * spacing)
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
c, k = float(edgeInfoDict[edgeIndex]['c']), float(edgeInfoDict[edgeIndex]['k'])
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': int(edgeIndex)}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = float(nodeInfoDict[headNode]['simulationData']['pressure'])
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = int(nodeInfoDict[headNode]['argsIndex'])
headNodeIndex = int(nodeInfoDict[headNode]['nodeIndex'])
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = float(nodeInfoDict[tailNode]['simulationData']['pressure'])
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
else:
tailPressureIndex = int(nodeInfoDict[tailNode]['argsIndex'])
tailNodeIndex = int(nodeInfoDict[tailNode]['nodeIndex'])
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = int(info['edgeIndex'])
velocityIn = float(info['velocityIn'])
edge = edgeList[edgeIndex]
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupEquationsForDistributeFlow(self):
"""
Setup equations for distributeFlowTest(). This function is unfinished. TODO
The resulting file is distributeFlowEqnDict and it contains three fields:
-- 'connectInfoDictList' --
It is a list of dicts and each dict represents an edge and it contains:
-- 'connection' -- In the form of [headNode, edgeIndex, tailNode]
-- 'edgeInfo' -- Contains subfields 'c'/'k'/'radius'/'length'
-- 'mergeInfoDict' --
Each merging node is a key and the corresponding value is empty (for now)
-- 'desiredTerminatingPressures' --
Each terminating node is a key and the corresponding value is the desired terminating pressure for that node
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
edgeList = self.edgeList
spacing = self.spacing
distributeFlowEqnDict = {'connectInfoDictList': [], 'mergeInfoDict': {}, 'desiredTerminatingPressures': {}}
edgeDepthArray = np.array([edgeInfoDict[edgeIndex]['depth'] for edgeIndex in edgeIndexList])
edgeIndexListSorted = np.array(edgeIndexList)[edgeDepthArray.argsort()].tolist()
for edgeIndex in edgeIndexListSorted:
edge = edgeList[edgeIndex]
headNode, tailNode = edge
if nodeInfoDict[headNode]['depth'] > nodeInfoDict[tailNode]['depth']:
headNode, tailNode = tailNode, headNode
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
distributeFlowEqnDict['connectInfoDictList'].append({'connection': [headNode, edgeIndex, tailNode], 'edgeInfo': {'radius': radius, 'length': length, 'c': c, 'k': k}})
for currentNode in G.nodes():
parentNodes = [node for node in G[currentNode].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[currentNode]['depth']]
if len(parentNodes) > 1:
distributeFlowEqnDict['mergeInfoDict'][currentNode] = {}
for node in G.nodes():
if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0:
distributeFlowEqnDict['desiredTerminatingPressures'][node] = 13560*9.8*0.12 # Pascal
print(edgeIndexListSorted)
print(distributeFlowEqnDict['mergeInfoDict'])
# Save #
self.distributeFlowEqnDict = distributeFlowEqnDict
def validateFluidEquations(self, velocityPressure=None, boundaryCondition=None):
"""
Validate if all of the equations generated by `setupFluidEquations` are satisfied. This function will output errors for
each of the equations and corresponding details. Note that the error for each equations is amplified in the same way as
in the function `computeNetworkDetail`.
Parameters
----------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = self.eqnInfoDictList
if velocityPressure is None:
velocityPressure = self.velocityPressure
counter = 0
pressureErrorList, flowErrorList = [], []
pressureErrorTrueList, flowErrorTrueList = [], []
for eqnInfoDict in eqnInfoDictList:
eqnType = eqnInfoDict['type']
if eqnType == 'pressure':
radius, length, velocityIndex, edgeIndex = itemgetter('radius', 'length', 'velocityIndex', 'edgeIndex')(eqnInfoDict)
velocity = np.abs(velocityPressure[velocityIndex])
c, k = eqnInfoDict['c'], eqnInfoDict['k']
if 'pressure' in eqnInfoDict['headPressureInfo']:
headPressure = eqnInfoDict['headPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['headPressureInfo']:
pressureIndex = eqnInfoDict['headPressureInfo']['pressureIndex']
headPressure = velocityPressure[pressureIndex]
headPressureInmmHg = headPressure / 13560 / 9.8 * 1000
if 'pressure' in eqnInfoDict['tailPressureInfo']:
tailPressure = eqnInfoDict['tailPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['tailPressureInfo']:
pressureIndex = eqnInfoDict['tailPressureInfo']['pressureIndex']
tailPressure = velocityPressure[pressureIndex]
tailPressureInmmHg = tailPressure / 13560 / 9.8 * 1000
deltaPressureByNode = np.abs(headPressure - tailPressure)
deltaPressureByHW = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704
error = np.abs(deltaPressureByNode - deltaPressureByHW)
deltaPressureByHWInmmHg = deltaPressureByHW / 13560 / 9.8 * 1000
errorInmmHg = error / 13560 / 9.8 * 1000
pressureErrorList.append(errorInmmHg * 500)
pressureErrorTrueList.append(errorInmmHg)
print('error={:.4f} mmHg, headP={:.2f} mmHg, tailP={:.2f} mmHg, headP>tailP={}, deltaPByHW={:.2f} mmHg, velocity={:.3f} cm/s, radius={:.4f} cm, length={:.4f} cm, edgeIndex={}'.format(errorInmmHg,
headPressureInmmHg, tailPressureInmmHg, headPressure>tailPressure, deltaPressureByHWInmmHg, velocity*100, radius*100, length*100, edgeIndex))
if headPressure <= tailPressure:
counter += 1
elif eqnType == 'flow':
velocityInIndexList, radiusInList = eqnInfoDict['velocityInIndexList'], eqnInfoDict['radiusInList']
velocityOutIndexList, radiusOutList = eqnInfoDict['velocityOutIndexList'], eqnInfoDict['radiusOutList']
neighborsInEdgeIndex, neighborsOutEdgeIndex = itemgetter('neighborsInEdgeIndex', 'neighborsOutEdgeIndex')(eqnInfoDict)
velocityInList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityInIndexList]
velocityOutList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityOutIndexList]
flowIn = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityInList, radiusInList)])
flowOut = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityOutList, radiusOutList)])
error = np.abs(flowIn -flowOut)
inVel = [np.round(100*vel, 4) for vel in velocityInList]
inR = [np.round(100*r, 4) for r in radiusInList]
inFlow = np.round(flowIn*10**6, 4)
outVel = [np.round(100*vel, 4) for vel in velocityOutList]
outR = [np.round(100*r, 4) for r in radiusOutList]
outFlow = np.round(flowOut*10**6, 4)
errorT = np.round(error*10**6, 4)
coord = eqnInfoDict['coord']
flowErrorList.append(error * 10**6 * 20000)
flowErrorTrueList.append(error * 10**6)
print('error={} cm^3/s, inVel={} cm/s, inR={} cm, inFlow={} cm^3/s, outVel={} cm/s, outR={} cm, outFlow={} cm^3/s, coord={}'.format(errorT, inVel, inR, inFlow, outVel, outR, outFlow, coord))
elif eqnType == 'boundary':
velocityIndex, velocityIn = eqnInfoDict['velocityIndex'], eqnInfoDict['velocityIn']
velocityActual = np.abs(velocityPressure[velocityIndex])
error = np.abs(velocityActual - velocityIn)
print('error={}, desired inlet velocity={} cm/s, actual velocity={} cm/s'.format(error, velocityIn*100, velocityActual*100))
totalErrorList = pressureErrorList + flowErrorList
totalError = norm(totalErrorList)
print('There are {} flow eqns where headPressure<=tailPressure'.format(counter))
print('Pressure error: mean+-std={}+-{} mmHg, min={} mmHg, max={} mmHg'.format(np.mean(pressureErrorTrueList), np.std(pressureErrorTrueList), np.amin(pressureErrorTrueList), np.max(pressureErrorTrueList)))
print('Flow error: mean+-std={}+-{} cm^3/s, min={} cm^3/s, max={} cm^3/s'.format(np.mean(flowErrorTrueList), np.std(flowErrorTrueList), np.amin(flowErrorTrueList), np.max(flowErrorTrueList)))
print('Combined error (magnified): {}'.format(totalError))
def BFS(self, startNodes, boundaryNodes):
"""
Start from given node(s), visit other nodes at larger depth in a BFS fashion.
Parameters
----------
startNodes : list
A list of nodes to start with.
boundaryNodes : list
A list of nodes used as the boundary.
Returns
-------
resultDict : dict
A dictionary containing the indices of visited edges and nodes.
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
visitedNodes, visitedEdges = [], []
for startNode in startNodes:
nodesAtCurrentDepth = [startNode]
while len(nodesAtCurrentDepth) != 0:
nodesAtNextDepth = []
for currentNode in nodesAtCurrentDepth:
visitedNodes.append(currentNode)
newNodes = [node for node in G[currentNode].keys() if nodeInfoDict[currentNode]['depth'] < nodeInfoDict[node]['depth'] and node not in boundaryNodes and node not in visitedNodes]
newEdges = [G[currentNode][newNode]['edgeIndex'] for newNode in newNodes]
nodesAtNextDepth += newNodes
visitedEdges += newEdges
nodesAtCurrentDepth = nodesAtNextDepth
resultDict = {'visitedNodes': visitedNodes, 'visitedEdges': visitedEdges}
return resultDict
def calculateVariableBounds(self):
"""
Calculate the pressure bound for each node and velocity bound for each branch (because pressure at child nodes
cannot be higher than that of the parent node).
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth-1, 0, -1):
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth and G.degree(node) != 1]
for nodeAtCurrentDepth in nodesAtCurrentDepth:
childNodes = [node for node in G[nodeAtCurrentDepth].keys() if nodeInfoDict[node]['depth'] > currentDepth]
minPressureAtChildNodes = [nodeInfoDict[node]['simulationData']['minPressure'] if 'argsIndex' in nodeInfoDict[node] else nodeInfoDict[node]['simulationData']['pressure'] for node in childNodes]
nodeInfoDict[nodeAtCurrentDepth]['simulationData']['minPressure'] = np.amax(minPressureAtChildNodes)
# print('minPressure for node {} is set'.format(nodeAtCurrentDepth))
# Save #
self.nodeInfoDict = nodeInfoDict
def perturbNetwork(self, option=1, extraInfo=None):
"""
Perturb the network in various ways
Option=1: randomly choose {numOfEdgesToPerturb} branches and decrease the radius by {reducePercentage}
Option=2: use the radius from year={perturbedYear}
Option=3: radius of the edges in {partitionToPerturb} are decreased by {reducePercentage}
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if option == 1:
if extraInfo is None:
numOfEdgesToPerturb = 5
reducePercentage = 30
else:
numOfEdgesToPerturb, reducePercentage = itemgetter('numOfEdgesToPerturb', 'reducePercentage')(extraInfo)
edgeIndexToPerturb = np.random.choice(edgeIndexList, numOfEdgesToPerturb)
for edgeIndex in edgeIndexToPerturb:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
elif option == 2:
perturbedYear, excludedEdgeIndex = itemgetter('perturbedYear', 'excludedEdgeIndex')(extraInfo)
self.loadNetwork(version=4, year=perturbedYear)
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
for edgeIndex in edgeIndexList:
if edgeIndex not in excludedEdgeIndex:
segmentIndex = edgeInfoDict[edgeIndex]['segmentIndex'] # segmentIndex is the index of the edges in the old files
perturbedRadius = segmentInfoDictOld[segmentIndex]['meanRadius']
edgeInfoDict[edgeIndex]['meanRadius'] = perturbedRadius
elif option == 3:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []},
'ACA': {'startNodes': [10], 'boundaryNodes': []}}
partitionToPerturb, reducePercentage = itemgetter('partitionToPerturb', 'reducePercentage')(extraInfo)
for partitionName, info in partitionInfo.items():
if partitionName in partitionToPerturb:
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
for edgeIndex in visitedEdges:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def perturbTerminatingPressure(self, option=1, extraInfo=None):
"""
Perturb the terminating pressure in various ways
Option=1: pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
Option=2: No change
Option=3: All left compartments -30%, no change to all other compartments
Option=4: pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': -0.44}
Option=5: pressureDropChangePerPartition obtained from extraInfo
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
if option == 1:
pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
elif option == 2:
pressureDecreasePerPartition = {'LMCA': 0, 'RMCA': 0, 'ACA': 0, 'LPCA': 0, 'RPCA': 0}
elif option == 3:
pressureDecreasePerPartition = {'LMCA': -0.3, 'RMCA': 0, 'ACA': 0, 'LPCA': -0.3, 'RPCA': 0}
elif option == 4:
pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': 0.44}
elif option == 5:
pressureDropChangePerPartition = extraInfo['pressureDropChangePerPartition']
rootPressure = 13560*9.8*0.12 # Pa
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
if option in [1,2,3]:
decreaseAmount = pressureDecreasePerPartition[partitionName]
nodeInfoDict[terminatingNode]['simulationData']['pressure'] *= (1-decreaseAmount)
elif option in [4, 5]:
changeAmount = pressureDropChangePerPartition[partitionName]
oldPressure = nodeInfoDict[terminatingNode]['simulationData']['pressure']
newPressure = rootPressure - (rootPressure - oldPressure) * (1+changeAmount)
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = newPressure
# terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
# terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
# print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def printTerminatingPressurePerPartition(self, partitionInfo=None):
"""
Print out terminating pressures in each compartment.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if partitionInfo is None:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
def setTerminatingPressure(self, option=1, extraInfo=None):
"""
Set the terminating pressure based on the terminating pressure vs path length relationship found in ADAN.
Note: make sure to use the right slope!!!
Option=1: all partitions use the slope from ADAN dataset
Option=2: use custom slope for each partition
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}}
# Use the slope and intercept from the ADAN dataset
if option == 1:
slopePressurePathLength, interceptPressurePathLength = itemgetter('slopePressurePathLength', 'interceptPressurePathLength')(resultADANDict)
print('slope={}, intercept={}'.format(slopePressurePathLength, interceptPressurePathLength))
fitResultPerPartition = {'LMCA': [slopePressurePathLength, interceptPressurePathLength], 'RMCA': [slopePressurePathLength, interceptPressurePathLength],
'LPCA': [slopePressurePathLength, interceptPressurePathLength], 'RPCA': [slopePressurePathLength, interceptPressurePathLength],
'ACA': [slopePressurePathLength, interceptPressurePathLength]}
# Use the slope and intercept fitted from a ground truth solution
elif option == 2:
fitResultPerPartition = extraInfo['fitResultPerPartition']
elif option == 3:
pass
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes, pressureIn = itemgetter('startNodes', 'boundaryNodes', 'pressureIn')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
slopePressurePathLength, interceptPressurePathLength = fitResultPerPartition[partitionName]
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pressure = pressureIn + pathLength * slopePressurePathLength * 0.8
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = pressure
terminatingPressuresInThisPartition.append(np.round(pressure/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def fitTerminatingPressureToPathLength(self, showFittingResult=False, figIndex=1, isLastFigure=False):
"""
Extract the terminating pressures from the existing fluid solution and fit them to path length per compartment.
Check the manual correction for LMCA!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'color': 'r'}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'color': 'g'},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'color': 'b'}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'color': 'y'},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'color': 'c'}}
fitResultPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
terminatingPressurePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
pathLengthPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = [nodeInfoDict[node]['simulationData']['pressure'] for node in terminatingNodesInThisPartition] # Pascal
pathLengthInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pathLengthInThisPartition.append(pathLength)
# Check this manual correction!
# if partitionName == 'LMCA':
# terminatingPressuresInThisPartition = [val for val in terminatingPressuresInThisPartition if val <= 13560*9.8*0.1]
# pathLengthInThisPartition = [val1 for val1, val2 in zip(pathLengthInThisPartition, terminatingPressuresInThisPartition) if val2 <= 13560*9.8*0.1]
terminatingPressurePerPartition[partitionName] = terminatingPressuresInThisPartition
pathLengthPerPartition[partitionName] = pathLengthInThisPartition
# slopeTerminatingPressureVSPathLength, interceptTerminatingPressureVSPathLength = np.polyfit(pathLengthInThisPartition, terminatingPressuresInThisPartition, 1)
slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength, stdErrorPressurePathLength = stats.linregress(pathLengthInThisPartition, terminatingPressuresInThisPartition)
print('{}: slopePressurePathLength={} Pa/m, interceptPressurePathLength={} Pa, rSquared={}, pValue={}'.format(partitionName, slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength))
fitResultPerPartition[partitionName] = [slopePressurePathLength, interceptPressurePathLength]
if showFittingResult:
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
ax = fig.add_subplot(1,5,1)
for partitionName, info in partitionInfo.items():
terminatingPressuresInThisPartition = terminatingPressurePerPartition[partitionName]
pathLengthInThisPartition = pathLengthPerPartition[partitionName]
xValues = [val * 1000 for val in pathLengthInThisPartition] # mm
yValues = [val / 13560 / 9.8 * 1000 for val in terminatingPressuresInThisPartition] # mmHg
color = info['color']
ax.scatter(xValues, yValues, c=color, label=partitionName)
ax.set_xlabel('Path length (mm)')
ax.set_ylabel('Terminating pressure (mmHg)')
ax.legend(prop={'size': 6})
if isLastFigure:
plt.show()
return fitResultPerPartition
def updateNetworkWithSimulationResult(self, velocityPressure):
"""
Update the flow rate and pressure in `edgeInfoDict` and `nodeInfoDict` with the given `velocityPressure`.
"""
G = self.G
edgeIndexList = self.edgeIndexList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
nodeInfoDict[node]['simulationData']['pressure'] = velocityPressure[argsIndex]
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = velocityPressure[argsIndex] # m/s
flow = velocity * np.pi * radius**2
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadFluidResult(self, loadFileName, return_ResultDict=False):
"""
Load the saved fluid simulation result.
For use with GBMTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResult')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
if return_ResultDict is False:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
else:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed, resultDict
def loadFluidResult2(self, loadFileName):
"""
Load the saved fluid simulation result.
For use with computeNetworkTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResultRandomNetwork')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
def GBMTest(self, saveResult=False):
"""
Create a GBM network with radius following the BraVa distribution, generate a ground truth solution, then perturb the network
in a particular way while keeping the terminating pressures unchanged, then try to solve the network.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
success = self.createGroundTruth()
self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
edgeNameDict = {0: 'LICA', 3: 'LICA', 2: 'RICA', 7: 'RICA', 1: 'VA', 4: 'RPCA\nComm', 8: 'LMCA', 9: 'LM', 11: 'RM', 10: 'RMCA', 5: 'LPCA', 6: 'RPCA', 20: 'ACA'}
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeDepth
nodeLabelDict = {} # None
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeDepth
nodeValueList = [0 for node in G.nodes()] # None
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeDepth
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()} # edge radius
edgeLabelDict = {edge: edgeNameDict[G[edge[0]][edge[1]]['edgeIndex']] if G[edge[0]][edge[1]]['edgeIndex'] in edgeNameDict else '' for edge in G.edges()} # edge name
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeDepth
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()] # edgeIndex
edgeValueList = [0 for edge in G.edges()] # None
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': [],
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': [],
'figTitle': 'Major branch name'}
self.plotNetwork(infoDict, figIndex=2, isLastFigure=True)
return
# print(G.edges(data=True))
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# Load previous optimization result #
loadFileName = 'fluidSimulationResult3(referenceYear=BraVa, perturbedYear=2013).pkl'
nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed = self.loadFluidResult(loadFileName)
velocityPressureInit = velocityPressurePerturbed
self.nodeInfoDict = nodeInfoDictPerturbed
self.edgeInfoDict = edgeInfoDictPerturbed
computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = self.nodeInfoDict[node]['argsIndex']
minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=100, stepsize=50, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}'.format(extraInfo['perturbedYear'])}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest2(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Perturb the terminating pressure in a specific way and check if the new system could be solved.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]} # perturbTerminatingPressureOption=2
# perturbTerminatingPressureOption = 1
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# GBM_BraVa_Reference flow_perturbTerminatingPressureOption=4_GBMTest2
# GBM_2013_Solved flow_perturbTerminatingPressureOption=4_GBMTest2
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}, TPOption={}'.format(extraInfo['perturbedYear'], perturbTerminatingPressureOption)} # TP->terminating pressure
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest2(referenceYear={}, perturbedYear={}, perturbTerminatingPressureOption={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'], perturbTerminatingPressureOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest3(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Test the solver
flowResult_referenceYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_referenceYear(BraVa)_groundTruthOption=2_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=2_GBMTest3
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}, 'solvedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
# perturbTerminatingPressureOption = 1
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# velocityPressureInit = self.getVelocityPressure() # Get velocityPressure from ground truth solution
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# computeNetworkDetail(velocityPressureInit, eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo)
# self.validateFluidEquations(velocityPressure=velocityPressureInit)
# print(list(zip(self.velocityPressureGroundTruth, velocityPressureInit)))
# return
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM Solved'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest3(referenceYear={}, solvedYear={}, groundTruthOption=2).pkl'.format(resultDict['referenceYear']['year'], resultDict['solvedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest4(self, perturbNetworkOption=1, saveResult=False):
"""
Perturb the radius in a specific way, set the TP using path length relationship and solve the network
Option=1: all LMCA edge radius decrease by 10%
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
extraInfo = {'partitionToPerturb': ['LMCA'], 'reducePercentage': 10}
self.perturbNetwork(option=perturbNetworkOption, extraInfo=extraInfo)
self.setNetwork(option=2)
self.createGroundTruth(option=2)
self.setTerminatingPressure(option=1, extraInfo=None)
computeNetworkDetailExtraInfo = None
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM BraVa, perturbNetworkOption={}'.format(perturbNetworkOption)}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest4(solvedYear=BraVa, perturbNetworkOption={}).pkl'.format(perturbNetworkOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest5(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, change the terminating pressure based on the volume change of the compartment.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(4, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
def GBMTest5b(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, TODO !!!
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResultTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
print(pressureDropChangePerPartition)
def GBMTest6(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Exactly the same as GBMTest5, tweaked the solver setting a little, trying to see if results can be improved.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=2000, stepsize=1000, interval=5, niter_success=16, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def computeNetworkTest(self, saveResult=False):
"""
Check whether the solve can correctly solve a system by creating a ground truth model first and comparing the simulation result with it
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
directory = self.directory
resultDict = {'reference': {}, 'perturbed': {}}
self.generateNetwork(maxDepth=5, allowMerge=False)
self.setNetwork(option=1)
success = False
self.createGroundTruth()
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# self.showTerminatingPressureAndPathLength()
resultDict['reference'] = {'G': G, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that in generateNetwork()!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
minPressure = nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
## intensionally perturb the inlet/terminating pressure away from ground truth to see how solver reacts
# self.nodeInfoDict[0]['simulationData']['pressure'] = 13560*9.8*0.12*(1-np.random.rand()*0.1) # perturb inlet pressure
## perturb terminating pressure
perturbPercent = 0.1
for node in G.nodes():
if G.degree(node) == 1:
self.nodeInfoDict[node]['simulationData']['pressure'] *= (np.random.rand() * perturbPercent + 1 - perturbPercent / 2)
## Perturb radius
# extraInfo = {'numOfEdgesToPerturb': 10, 'reducePercentage': 30}
# self.perturbNetwork(option=1, extraInfo=extraInfo)
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# minimize (L-BFGS-B), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'maxiter': 25000, 'maxfun': 25000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, method='L-BFGS-B', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# minimize (BFGS), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'norm': 2, 'maxiter': 30000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), method='BFGS', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 0
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'options': {'norm': np.inf, 'maxiter': 30000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 30000, 'maxfun': 30000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=300, T=5, stepsize=5, interval=5, niter_success=20, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# Matlab fsolve #
# self.setupFluidEquationsMatLab()
# eqnInfoDictList = self.eqnInfoDictList
# import matlab.engine, io
# # eng = matlab.engine.start_matlab()
# eng = matlab.engine.connect_matlab()
# eng.addpath('/Users/zhuj10/Dropbox/NIH/Data/Ron Data/1358-Subject18016/fluidSimulationWithCoW')
# print(matlab.engine.find_matlab())
# out = io.StringIO()
# err = io.StringIO()
# solver = 'fsolve'
# solver = 'lsqnonlin'
# # solver = 'Validate'
# # velocityPressureGroundTruth = self.velocityPressureGroundTruth
# # velocityPressureInit = [float(p) for p in velocityPressureTrue]
# velocityPressureInit = [float(p) for p in velocityPressureInit]
# optResult = eng.performFluidSimulation4ForMatLab(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # optResult = eng.testMatLab1(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # print(optResult)
# print(out.getvalue())
# print(err.getvalue())
# cost = optResult['error']
# message = optResult['message']
# velocityPressure = optResult['optParam'][0]
##
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, ( | np.amax(pressures) | numpy.amax |
# --------------------------------------------------------------------------------
# Copyright (c) 2017-2020, <NAME>, All rights reserved.
#
# Defines the Euclidean and pseudo-Euclidean geometries.
# --------------------------------------------------------------------------------
import numpy as np
import cdg.utils
import cdg.geometry
def generate_uniformplane_data(n, d, perturb=0):
# XX = np.random.rand(n, d-1)
XX = np.random.rand(n, d - 1)
X1 = np.dot(XX, np.ones((d - 1, 1)))
X = np.zeros((n, d))
X[:, :1] += X1
X[:, 1:] += XX
if perturb != 0:
X += np.random.normal(scale=perturb, size=X.shape)
return X
def generate_banana_data(n, d, perturb=0):
# XX = np.random.rand(n, d-1)
alpha = np.random.rand(n, 1) * np.pi * .7
X2 = np.concatenate((np.sin(alpha), np.cos(alpha)), axis=1)
X = np.zeros((n, d))
X[:, :2] += X2
if perturb != 0:
X += np.random.normal(scale=perturb, size=X.shape)
return X
def generate_uniformsemisphere_data(n, d, radius, perturb=0):
# XX = np.random.rand(n, d-1)
XX = np.random.normal(size=(n, d)) * .03
# X1 = np.random.rand(n,1)*.02 + 0.1
X1 = np.ones((n, 1)) * .1
X = np.zeros((n, d + 1))
X[:, :1] = X1
X[:, 1:] = XX
X = cdg.geometry.manifold.SphericalManifold.clip(X_mat=X, radius=radius)
if perturb != 0:
raise NotImplementedError()
return X
def get_sphere_coord(radius):
if radius is None:
radius = 1
u = np.linspace(0, 2 * np.pi, 20)
v = np.linspace(0, np.pi, 20)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
return x, y, z
def get_hyperboloid_coord(radius):
if radius is None:
radius = 1
u = np.linspace(0, 2 * np.pi, 20)
rho = np.linspace(0, np.pi, 20)
x = np.outer(np.cos(u), rho)
y = np.outer(np.sin(u), rho)
z = np.sqrt(x ** 2 + y ** 2 + radius ** 2)
return x, y, z
class Geometry(object):
@classmethod
def _I(cls, n):
raise cdg.utils.AbstractMethodError()
@classmethod
def scalar_product(cls, X1_mat, X2_mat):
raise cdg.utils.AbstractMethodError()
@classmethod
def norm_squared(cls, X_mat):
raise cdg.utils.AbstractMethodError()
@classmethod
def norm(cls, X_mat):
raise cdg.utils.AbstractMethodError()
@classmethod
def reduced_solution(cls, eig_vec, eig_val, dim):
raise cdg.utils.AbstractMethodError()
class Eu(Geometry):
@classmethod
def _I(cls, n):
return np.eye(n)
@classmethod
def scalar_product(cls, X1_mat, X2_mat):
return X1_mat.dot(X2_mat.T)
@classmethod
def norm(cls, X_mat):
return np.linalg.norm(X_mat, axis=1)[..., None]
@classmethod
def norm_squared(cls, X_mat):
return cls.norm(X_mat)**2
@classmethod
def reduced_solution(cls, eig_vec,eig_val,dim):
lambda_abs = np.abs(eig_val[:dim])
lambda_mat = np.diag(np.sqrt(lambda_abs))
return np.dot(eig_vec[:, :dim], lambda_mat), sum(lambda_abs[dim:])
@classmethod
def distance_squared(cls, X1_mat, X2_mat):
D2 = -2. * cls.scalar_product(X1_mat=X1_mat, X2_mat=X2_mat)
D2 += cls.norm_squared(X_mat=X1_mat)
D2 += cls.norm_squared(X_mat=X2_mat).T
assert D2.min() > -1e-10
return np.clip(D2, a_min=0., a_max=None)
@classmethod
def distance(cls, X1_mat, X2_mat):
return np.sqrt(cls.distance_squared(X1_mat=X1_mat, X2_mat=X2_mat))
class PEu1(Geometry):
@classmethod
def _I(cls, n):
a = np.eye(n)
a[-1,-1]=-1
return a
@classmethod
def scalar_product(cls, X1_mat, X2_mat):
return X1_mat.dot(cls._I(n=X2_mat.shape[1]).dot(X2_mat.T) )
@classmethod
def norm_squared(cls, X_mat):
return np.sum(X_mat.dot(cls._I(n=X_mat.shape[1])) * X_mat, axis=1)[..., None]
@classmethod
def norm(cls, X_mat):
norm2 = cls.norm_squared(X_mat)
return np.sqrt(np.abs(norm2)) *np.sign(norm2)
@classmethod
def reduced_solution(cls, eig_vec, eig_val, dim):
X = np.zeros((len(eig_val),dim))
lambda_abs = np.abs(eig_val)
lambda_mat = np.diag(np.sqrt(lambda_abs))
X[:,:dim-1] = cls.scalar_product(eig_vec[:,:dim-1], lambda_mat[:dim-1,:dim-1])
X[:, -1:] = cls.scalar_product(eig_vec[:, -1:], lambda_mat[-1:, -1:])
return X, sum(lambda_abs[dim-1:-1])
class CCRiemannianManifold(cdg.utils.Loggable, cdg.utils.Pickable):
"""
Defines the structure of a constant-curvature Riemannian manifold.
This class is intended to be extended as spherical and hyperbolic manifolds.
Notice that if the manifold dimension is d, then points are represented by
d+1 coordinates in an ambient space vector space.
The general notation is:
- x, X: points on the manifold
- nu, Nu: points on the tangent space in local coordinates
- v, V: points on the tangent space in global coordinates, that is with
respect to the representation of the manifold
"""
_name = 'ConstantCurvatureRiemannianManifold'
curvature = None
manifold_dimension = None
_geo = None # geometry of the ambient space.
_sinm = None # sine function to be instantiated
_cosm = None # cosine function to be instantiated
def __init__(self, **kwargs):
self.log.debug('{} created'.format(self))
self.set_parameters(**kwargs)
def __str__(self, extra=''):
return self._name + "(d{}|r{}{})".format(self.manifold_dimension, self.radius, extra)
def set_parameters(self, **kwargs):
self.manifold_dimension = kwargs.pop('man_dim', self.manifold_dimension)
self.curvature = kwargs.pop('curvature', None)
self.set_radius(kwargs.pop('radius', None))
@property
def radius(self):
if self.curvature == 0 or self.curvature is None:
return None
else:
return 1. / np.sqrt(np.abs(self.curvature))
@classmethod
def exp_map(cls, x0_mat, Nu_mat):
"""
Exponential map from tangent space to the manifold
:param x0_mat: point of tangency
:param Nu_mat: (n, man_dim) points on the tangent space to be mapped
:return: X_mat: points on the manifold
"""
# tangent vectors in global coordinates
B = cls._local_basis(x0_mat=x0_mat)
V_mat = np.dot(Nu_mat, B.transpose())
# tangent vectors in global coordinates
X_mat = cls._exp_map_global_coord(x0_mat=x0_mat, v_mat=V_mat)
return X_mat
@classmethod
def log_map(cls, x0_mat, X_mat):
"""
Logarithm map from manifold to tangent space.
:param x0_mat: point of tangency
:param X_mat: (n, emb_dim) points on the manifold to be mapped
:return: Nu_mat: points on the tangent space
"""
# tangent vectors in global coordinates
V_mat = cls._log_map_global_coord(x0_mat=x0_mat, x_mat=X_mat)
# tangent vectors in local coordinates
B = cls._local_basis(x0_mat=x0_mat)
Nu_mat = cls._geo.scalar_product(V_mat, B.transpose())
return Nu_mat
@classmethod
def sample_mean(cls, X, **kwargs):
X_mat = cdg.utils.arrange_as_matrix(X=X)
# find argmin_{x\in X} \sum_i \rho(x,X_i)^2
Dn = cls.distance(X1=X_mat, **kwargs)
mean_id, _ = cdg.geometry.prototype.mean(dissimilarity_matrix=Dn, power=2)
x_new = X_mat[mean_id:mean_id + 1, :].copy()
# Optimise
xk = x_new.copy() + 10. # hack to be neq x_new
iter_max = 10
ct = 0
while ct < iter_max and not np.allclose(xk, x_new):
ct += 1
xk = x_new.copy()
Nu = cls.log_map(x0_mat=xk, X_mat=X_mat)
mean_log = cdg.utils.arrange_as_matrix(np.mean(Nu, axis=0))
assert not np.isnan(mean_log[0]).any()
x_new = cls.exp_map(x0_mat=xk, Nu_mat=mean_log)
# check overflow
if np.linalg.norm(x_new) > 1e4:
raise ValueError('Risk of overflow')
return x_new
@classmethod
def clip(cls, X_mat, radius):
raise cdg.utils.AbstractMethodError()
@classmethod
def distance(cls, X1, X2=None, **kwargs):
"""
Geodesic distance between points.
:param X1: (n1, d)
:param X2: (n2, d)
:param kwargs:
:return: distance matrix (n1, n2)
"""
# input parsing
X1_mat = cdg.utils.arrange_as_matrix(X=X1)
if X2 is None:
only_upper_triangular = True
X2_mat = X1_mat.copy()
else:
only_upper_triangular = False
X2_mat = cdg.utils.arrange_as_matrix(X=X2)
assert X1_mat.shape[1] == X2_mat.shape[1]
# actual computation
D = cls._distance(X1_mat=X1_mat, X2_mat=X2_mat,
only_upper_triangular=only_upper_triangular,
**kwargs)
assert np.argwhere(np.isnan(D)).shape[0] == 0
return D
@classmethod
def _local_basis(cls, x0_mat, curvature=None):
"""
Basis in the global frame of the tangent space on point x0
:param x0_mat: (1, d+1)
:param curvature:
:return: (d+1, d)
"""
dim = x0_mat.shape[1] - 1
curvature = cls._curvature_from_datum(x0_mat) if curvature is None else curvature
B_tmp = cls._geo._I(dim + 1) - np.dot(x0_mat.transpose(), x0_mat) * curvature
indices = [i for i in range(dim + 1)]
# check if its trivial
found = False
for i in range(dim + 1):
noti = indices[:i] + indices[i + 1:]
if np.isclose(x0_mat[0, noti], np.zeros((1, dim)), rtol=1e-4, atol=1e-4).all():
Bp = B_tmp[:, noti]
found = True
break
# select a non orthogonal column to drop
if not found:
for i in range(dim + 1):
noti = indices[:i] + indices[i + 1:]
if not np.isclose(np.dot(B_tmp[:, noti].transpose(), B_tmp[:, i]), \
np.zeros((1, dim)), \
rtol=1e-4, atol=1e-4).all():
Bp = B_tmp[:, noti]
found = True
break
if np.linalg.matrix_rank(Bp) != dim or not found:
raise cdg.util.errors.CDGImpossible()
# gram schmidt
B = np.zeros(Bp.shape)
for i in range(dim):
B[:, i] = Bp[:, i]
for j in range(i):
B[:, i] -= B[:, j] * cls._geo.scalar_product(B[None, :, i], B[None, :, j])[0, 0]
B[:, i] /= cls._geo.norm(B[None, :, i])[0]
return B
@classmethod
def _exp_map_global_coord(cls, x0_mat, v_mat, theta=None):
if theta is None:
theta = cls._theta_v(x0_mat=x0_mat, v_mat=v_mat)
if theta.ndim == 0:
theta = theta[..., None]
mask = theta[..., 0] == 0 # Check which elements would be divided by 0
theta[mask] = 1. # Replace theta=0 with theta=1 so the division will have no effects
output = cls._cosm(theta) * x0_mat + cls._sinm(theta) / theta * (v_mat) # Compute values
output[mask] = x0_mat # Replace values that vould have been NaNs with x0
return output
@classmethod
def _log_map_global_coord(cls, x0_mat, x_mat, theta=None):
if theta is None:
theta = cls._theta_x(x0_mat=x0_mat, x_mat=x_mat)
if theta.ndim == 0:
theta = theta[..., None]
mask = theta[..., 0] == 0 # Check which elements would result in division by 0
theta[mask] = 1. # Replace theta=0 with theta=1 so the division will have no effects
output = theta / cls._sinm(theta) * (x_mat - cls._cosm(theta) * x0_mat)
output[mask] = np.zeros(x0_mat.shape) # Replace values that vould have been NaNs with zeros
return output
@classmethod
def _theta_v(cls, x0_mat, v_mat):
# x0_mat = arrange_as_matrix(X=x0)
radius = cls._radius_from_datum(x_mat=x0_mat)
th = cls._geo.norm(v_mat) / radius
assert np.all(th >= 0) # From a theoretical point of view this can't happen because, despite the
# pseudo-Euclidean geometry, the tangent space is Euclidean.
return th
@classmethod
def _theta_x(cls, x0_mat, x_mat):
radius = cls._radius_from_datum(x_mat=x0_mat)
# raise NotImplementedError('qui devo vedere se usare una distanza paired')
return cls.distance(X1=x_mat, X2=x0_mat, radius=radius) / radius
@classmethod
def _radius_from_datum(cls, x_mat):
r = cls._geo.norm(X_mat=x_mat[:1])[0, 0]
return r if r > 0 else -r
@classmethod
def _curvature_from_datum(cls, x_mat):
r2 = cls._geo.norm_squared(X_mat=x_mat)[0, 0]
return 1 / r2
def reduced_solution(self, eig_vec, eig_val, emb_dim):
return self._geo.reduced_solution(eig_vec=eig_vec, eig_val=eig_val, dim=emb_dim)
@classmethod
def _distance(cls, X1_mat, X2_mat, **kwargs):
"""
Core part of the distance computation.
:param X1_mat: input points (n1, emb_dim)
:param X2_mat: input points (n2, emb_dim)
"""
radius = kwargs.pop('radius', None)
if radius is None:
radius = cls._radius_from_datum(x_mat=X1_mat[:1])
# nn = max([X1_mat.shape[0], 10])
# radius = 0
# for x in X1_mat[:nn]:
# radius += cls._radius_from_datum(x_mat=x[None, ...]) / nn
gramiam = cls._geo.scalar_product(X1_mat=X1_mat, X2_mat=X2_mat)
D = cls._scalarprod2distance(gramiam, radius)
return D
@classmethod
def _scalarprod2distance(cls, scalar_prod_mat, radius):
raise cdg.utils.AbstractMethodError()
def distance2scalarprod(self, dist_mat):
raise cdg.utils.AbstractMethodError()
class SphericalManifold(CCRiemannianManifold):
"""
Positive curvatures.
"""
_name = 'S'
# Euclidean geometry of the hosting vector space
_geo = Eu()
# ordinary sine and cosine
_sinm = np.sin
_cosm = np.cos
def set_radius(self, value):
if value is not None:
self.curvature = 1. / value ** 2
@classmethod
def clip(cls, X_mat, radius):
norms = cls._geo.norm(X_mat=X_mat)
X_mat = X_mat / norms
return X_mat * radius
@classmethod
def _scalarprod2distance(cls, scalar_prod_mat, radius):
corr = scalar_prod_mat * 1. / radius ** 2
condition = np.logical_and(np.abs(corr) > 1., np.abs(corr) < (1. + 1e-4))
corr_clip = np.where(condition, 1., corr)
with np.errstate(invalid='raise'):
try:
dtmp = np.arccos(corr_clip)
except FloatingPointError:
raise FloatingPointError('np.arccos:'
'Out of bounds points: {}/1.0 '.format(condition.mean()) +
'Min minus_corr_clip: {}'.format( | np.min(corr_clip) | numpy.min |
import os
import numpy as np
import math
from math import pi
import scipy.ndimage.morphology
from scipy import ndimage
import skimage.morphology
from typing import Sequence, Tuple, Union, Optional, List
def angle_2_da_vector(angles: np.ndarray) -> np.ndarray:
"""
Angles in radians to double-angle vector space; 0 radians -> (1, 0), pi/4 radians -> (0, 1)
Args:
angles: torch.Tenor of shape (batch, 1, x, y)
Returns: torch tensor of shape (batch, 2, x, y)
"""
double_angle = angles*2
da_vectors_x = np.cos(double_angle)
da_vectors_y = np.sin(double_angle)
da_vectors = np.concatenate([da_vectors_x, da_vectors_y], axis=1)
return da_vectors
class Clustering:
"""
Separates direction-style network output into separate chromosomes. Work on a single image (no batch).
The hyperparameters are saved in the __init__ function.
"""
def __init__(self,
minimum_intersection_area: int = 6,
max_distance: int = 4,
merge_peaks_distance: int = 1,
minimum_clusters_area: int = 10,
minimum_adjacent_area: int = 8,
direction_sensitivity: float = 0.87,
cluster_grow_radius: Union[float, int] = 1.2,
max_chromosome_width: int = 10,
intersection_grow_radius: Union[float, int] = 1.5,
direction_local_weight: float = 0.9):
"""
Save the hyperparameters.
:param minimum_dilated_intersection_area: Delete intersections that are smaller
:param max_distance: The distance that all values in the distance transform are rounded down to
:param merge_peaks_distance: how far a local maximum has to be from another cluster before considered
a separate class
:param minimum_clusters_area: Removes clusters with fewer pixels than minimum_clusters.
:param minimum_adjacent_area: How many touching pixels must two categories have to be considered for merging.
:param direction_sensitivity: How similar must the direction be in order to merge two categories
:param cluster_grow_radius: By how much to grow areas when checking for interconnectivity.
:param max_chromosome_width: Don't join categories if their resulting width is larger than this width
:param intersection_grow_radius: By how much to grow intersections when trying to merge them
:param direction_local_weight: When determining direction similarity, what proportion should be used from
a local neighbourhood of the intersection. (1-weight) used for whole channel direction similarity.
"""
self.minimum_intersection_area = minimum_intersection_area
self.max_distance = max_distance
self.merge_peaks_distance = merge_peaks_distance
self.minimum_clusters_area = minimum_clusters_area
self.minimum_adjacent_area = minimum_adjacent_area
self.direction_sensitivity = direction_sensitivity
self.cluster_grow_radius = cluster_grow_radius
self.max_chromosome_width = max_chromosome_width
self.intersection_grow_radius = intersection_grow_radius
self.direction_local_weight = direction_local_weight
def direction_2_separate_chromosomes(self,
index_3category: np.ndarray,
dilated_intersection: np.ndarray,
direction_angle: np.ndarray):
"""
Transform the output of a direction style network to a classification
:param index_3category: shape: (1, x, y) with indices 0: background, 1: chromosome, 2: intersection
:param dilated_intersection: binary tensor of shape (1, x, y)
:param direction_angle: float tensor of the angle defined in areas with index_3category == 1
:return: A binary tensor of shape (chromosomes, x, y)
"""
unique = np.equal(index_3category, 1)
intersection = np.equal(index_3category, 2)
dilated_intersection = np.greater(dilated_intersection, 0)
direction_angle = np.mod(direction_angle, pi)
da_vector = angle_2_da_vector(direction_angle[None, :, :, :])[0]
# delete areas of intersection that are too small
intersection = remove_small_areas(intersection, self.minimum_intersection_area)
dilated_unique = np.logical_and(unique, np.logical_not(dilated_intersection))
distance = scipy.ndimage.morphology.distance_transform_cdt(unique)
distance_dilated = scipy.ndimage.morphology.distance_transform_cdt(dilated_unique)
clusters_dilated = self.distance_clustering(distance_dilated)
clusters = self.distance_clustering_with_seed(distance, clusters_dilated)
channels = cluster_idx_2_channels(clusters)
# Merge channels not near intersections
channels = self.merge_channels_not_near_intersections(channels, unique, intersection, da_vector)
channels = remove_small_channels(channels, self.minimum_clusters_area)
channels = self.merge_channels_across_intersection_assume_two(channels, intersection, da_vector)
separate_chromosomes = combine_channels_and_intersection(channels, intersection)
return separate_chromosomes
def distance_clustering(self, distance_image: np.ndarray) -> np.ndarray:
"""
Perform iterative clustering based on the distance_image provided.
Local maxima are turned into clusters unless they merge with another cluster within merge_peaks_distance.
Clusters are grown to lower distances in the image.
:param distance_image: integer np array of distances to cluster by
Returns: A integer np array, where the values correspond to chromosome clusters (0 is the background)
"""
assert distance_image.dtype == np.int or distance_image.dtype == np.int32
distance_image_clipped = np.clip(distance_image, 0, self.max_distance)
return self.distance_clustering_with_seed(distance_image_clipped, np.zeros_like(distance_image))
def distance_clustering_with_seed(self,
distance_image: np.ndarray,
seed_clusters: np.ndarray,
) -> np.ndarray:
"""
Perform iterative clustering based on the distance_image provided.
Local maxima are turned into clusters unless they merge with another cluster within merge_peaks_distance.
Clusters are grown to lower distances in the image.
Args:
distance_image: integer np array of distances to cluster by
seed_clusters: The initial clusters to start with
Returns: A integer np array, where the values correspond to chromosome clusters (0 is the background)
"""
assert distance_image.dtype == np.int32
assert seed_clusters.dtype == np.int32
clusters = seed_clusters.copy()
# ensure clusters have no gaps (in case we merged)
clusters = remove_cluster_gaps(clusters)
new_clusters_expiry = [0] * (np.max(clusters) + 1) # idx 0 for background
selem = np.ones((1, 3, 3))
for current_distance in reversed(range(1, np.max(distance_image) + 1)):
# grow clusters until all points in specific range are covered. Separate points will be added as new labels
remaining_points = np.ones_like(clusters)
while True:
remaining_points_new = np.logical_and(distance_image == current_distance, clusters == 0)
if np.all(remaining_points == remaining_points_new):
# no changes
break
remaining_points = remaining_points_new
# grow clusters
for cluster_i in range(1, np.max(clusters) + 1):
grown_label = skimage.morphology.binary_dilation(clusters == cluster_i, selem=selem)
grown_label = np.logical_and(grown_label, remaining_points)
clusters[grown_label] = cluster_i
# check whether to merge labels (because of local optimums based on merge_peaks_distance)
clusters_to_check = np.where(np.array(new_clusters_expiry) > 0)[0]
for cluster_to_check in clusters_to_check:
grown_area = skimage.morphology.binary_dilation(clusters == cluster_to_check, selem=selem)
clusters_in_grown_area = set(clusters[grown_area]) - {0}
if len(clusters_in_grown_area) > 1:
cluster_to_merge_to = min(clusters_in_grown_area - {cluster_to_check})
clusters[clusters == cluster_to_check] = cluster_to_merge_to
# ensure clusters have no gaps (in case we merged)
clusters, new_clusters_expiry = remove_cluster_gaps(clusters, new_clusters_expiry)
# reduce new_labels_expiry
new_clusters_expiry = [max(0, value - 1) for value in new_clusters_expiry]
# check for new clusters
if | np.any(remaining_points) | numpy.any |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 2 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.neighbors import NearestNeighbors
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed -- during nn finetuning
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
import sys
'''
CustomDataset object takes care of supplying an observation (image, labels).
It also performs image preprocessing, such as normalization by color channel.
In case of training, it also performs random transformations, such as horizontal flips, resized crops, rotations, and color jitter.
'''
class CustomDataset(Dataset):
def __init__(self, data, tr = True):
self.data = data
self.paths = self.data['img_path'].values.astype('str')
self.data_len = self.data.shape[0]
self.labels = self.data[q_list].values.astype('int32')
self.control_metrics = self.data[control_list].values.astype('float32')
# transforms
if tr:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.1,contrast=0.1,saturation=0.1,hue=0.1)], p=0.75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
def __getitem__(self, index):
img_path = PATH + '/'+ self.paths[index]
img = Image.open(img_path)
img_tensor = self.transforms(img)
label = self.labels[index]
control_metric = self.control_metrics[index]
return (img_tensor, label, control_metric)
def __len__(self):
return self.data_len
#get pretrained resnet50 model
def get_pretrained():
model = models.resnet50(pretrained=True)
return model
# replace last layer
def prepare_for_finetuning(model):
for param in model.parameters():
param.requires_grad = False
param.requires_grad = True
#replacing last layer with new fully connected
model.fc = torch.nn.Linear(model.fc.in_features,n_outs)
return
# create an object that uses CustomDataset object from above to load multiple observations in parallel
def create_dataloader(data,rand=True):
if rand: # shuddle observations
dataset = CustomDataset(data, tr=True)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=10, drop_last=False)
else: # load in fixed order of data
dataset = CustomDataset(data, tr=False)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SequentialSampler(dataset), num_workers=10, drop_last=False)
return loader
#finetune and save neural net model
def finetune_and_save(loader_train, loader_test):
# loading pretrained model and preparing it for finetuning
model = get_pretrained()
prepare_for_finetuning(model)
if CUDA:
model.cuda()
# optimize only last six layers
layers = list(model.children())
params = list(layers[len(layers)-1].parameters())+list(layers[len(layers)-2].parameters())+list(layers[len(layers)-3].parameters())+list(layers[len(layers)-4].parameters())+list(layers[len(layers)-5].parameters())+list(layers[len(layers)-6].parameters())
optimizer = optim.Adamax(params=params, lr=0.001)
# print("starting finetuning")
hist = {}
hist['d_labs'] = q_list
hist['train_loss'] = []
hist['val_loss'] = []
hist['train_loss_d'] = []
hist['val_loss_d'] = []
hist['train_auc_d'] = []
hist['val_auc_d'] = []
acc_best = 0.0
#train
for epoch in range(N_EPOCHS):
train_loss, train_loss_d, train_auc_d = run_epoch(model, loss_f, optimizer, loader_train, update_model = True) # training
eval_loss, eval_loss_d, eval_auc_d = run_epoch(model, loss_f, optimizer, loader_test, update_model = False) # evaluation
hist['train_loss'].append(train_loss)
hist['val_loss'].append(eval_loss)
hist['train_loss_d'].append(train_loss_d)
hist['val_loss_d'].append(eval_loss_d)
hist['train_auc_d'].append(train_auc_d)
hist['val_auc_d'].append(eval_auc_d)
with open(RESULTS+'/eval_record.json', 'w') as fjson:
json.dump(hist, fjson)
# saving model
torch.save(model, RESULTS+"/finetuned_model")
return
# function that performa training (or evaluation) over an epoch (full pass through a data set)
def run_epoch(model, loss_f, optimizer, loader, update_model = False):
if update_model:
model.train()
else:
model.eval()
loss_hist = []
loss_hist_detailed = []
auc_hist_detailed = []
for batch_i, var in tqdm(enumerate(loader)):
loss, loss_detailed, auc_detailed = loss_f(model, var)
if update_model:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist.append(loss.data.item())
loss_hist_detailed.append(loss_detailed)
auc_hist_detailed.append(auc_detailed)
loss_detailed = pd.DataFrame(loss_hist_detailed)
loss_detailed.columns = q_list
auc_detailed = pd.DataFrame(auc_hist_detailed)
auc_detailed.columns = q_list
return np.mean(loss_hist).item(), loss_detailed.mean(0).values.tolist(), auc_detailed.mean(0).values.tolist()
# function to compute loss from a batch data
def loss_f(model, var):
data, target, _ = var
data, target = Variable(data), Variable(target)
if CUDA:
data, target = data.cuda(), target.cuda()
output = model(data) # match for the user and focal game
loss = 0
loss_detailed = []
auc_detailed = []
for i in range(len(q_d_list)):
w = torch.FloatTensor(class_weights[i])
if CUDA:
w = w.cuda()
# output contains scores for each level of every predicted variable
# q_d_list[i] is number of levels to variable i
# q_d_list_cumsum[i] is a cumulative sum over number of levels for variable i and all variables before it
# all variables ordered as in q_list
# (q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i] then gives exact coordinates of the scores for variable i
# among all scores in the output
temp = F.cross_entropy(output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]], target[:,i].long(), weight=w)
loss_detailed.append(temp.data.item())
loss += temp
# now we calculate AUC
y_true = target[:,i].detach().cpu().numpy()
y_score = output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]].detach().cpu().numpy()[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc_detailed.append(metrics.auc(fpr, tpr))
return loss, loss_detailed, auc_detailed
# building class balancing weights as in
# https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
def calculate_class_weights(X):
class_weights = []
for i in q_list:
class_weights.append(
class_weight.compute_class_weight('balanced', np.unique(X[i].values), X[i].values))
return class_weights
# extract data from a dataloader as a set of image features X and set of labels y, corresponding to those image features
# can also blackout specified areas of the loaded images before extracting the image features -- this is used in our experiments
# when data loader is deterministic, then it will load in the same data again and again
def extract_data(loader, modelred, blackout=None):
X = []
y = []
z = []
for batch_i, var in tqdm(enumerate(loader)):
data, target, control_metrics = var
if blackout is not None:
data[:, :, blackout[0]:blackout[1], blackout[2]:blackout[3]] = 0.0
data, target, control_metrics = Variable(data), Variable(target), Variable(control_metrics)
if CUDA:
data, target, control_metrics = data.cuda(), target.cuda(), control_metrics.cuda()
data_out = modelred(data)
X.append(data_out.detach().cpu().numpy())
y.append(target.detach().cpu().numpy())
z.append(control_metrics.detach().cpu().numpy())
X = np.vstack(X).squeeze()
y = np.vstack(y)
z = np.vstack(z)
return X, y, z
# function to evaluate a set of trained classifier using AUC metric
# 'models' contains classifiers in order of binary variables to be predicted -- which are contaiend in Y
# X is a matrix of covariates
def analytics_lin(models, X, Y):
acc = {}
auc = {}
for i in tqdm(range(Y.shape[1])):
y_true = Y[:,i]
mod = models[i]
y_pred = np.argmax(mod.predict_proba(X),axis=1)
# auc
y_prob = mod.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_prob)
auc[q_list[i]] = metrics.auc(fpr, tpr)
return auc
# sequentially yield coordinates for blackout in an image
def sliding_window(image_shape, stepSize, windowSize):
# slide a window across the image
for yc in range(0, image_shape[0], stepSize):
for xc in range(0, image_shape[1], stepSize):
# yield the current window
yield (yc, yc + windowSize[1], xc, xc + windowSize[0])
# calculating decrease in AUC when blocking a particular area of an image -- over 8x8 grid placed over the image
def img_area_importance(modelred, models, svd, dat, auc_true):
patch_importance = {}
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
loader = create_dataloader(dat,rand=False)
# X_modified_raw contains image features extracted from images with a portion of the image blocked
X_modified_raw, Y, _ = extract_data(loader, modelred, (y0, y1, x0, x1))
# image features reduced to 500 via svd
X_modified = svd.transform(X_modified_raw)
auc = analytics_lin(models, X_modified, Y)
patch_importance_q = {} # contains -(decrease in auc after blocking of an image)
for q in q_list:
patch_importance_q[q] = auc_true[q] - auc[q]
patch_importance[(y0, y1, x0, x1)] = patch_importance_q # decrease in auc across all variables -- for the given blocked portion of the image
return patch_importance
# START OF THE RUN
torch.set_num_threads(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
N_EPOCHS = 20
FINETUNE = True
CUDA = torch.cuda.is_available()
batch_size=10
PATH = './data'
# analysis on face vs. bodies
CASHIER = sys.argv[1]#'ALL' #'4' # 3 #
control_list = ['02.05','03.05','04.05','05.05','06.05','07.05','08.05','09.05','10.05', '11.05', '12.05', '13.05',
'time_1', 'time_2', 'time_3', 'time_4']
if CASHIER == 'ALL':
data = pd.read_csv(PATH+'/data_face.csv')
RESULTS = './results_face'
control_list = control_list + ['cashier4']
elif CASHIER == '4':
data = pd.read_csv(PATH+'/data_face.csv')
data = data[data['cashier4']==1]
RESULTS = './results_face_'+CASHIER
elif CASHIER == '3':
data = pd.read_csv(PATH+'/data_face.csv')
data = data[data['cashier4']==0]
RESULTS = './results_face_'+CASHIER
else:
print('Invalid data type -- terminating')
exit()
os.makedirs(RESULTS, exist_ok=True)
# list of variables
q_list = ['alcohol', 'vodka', 'beer', 'cola', 'ice_cream', 'banana', 'bread', 'eggs', 'chocolate', 'vegetables', 'fruits', 'over_10_item_types', 'amount_over_300']
# names for variables
q_to_full_name_dict = {
'alcohol': 'Alcohol',
'vodka' : 'Vodka',
'beer' : 'Beer',
'cola': 'Cola',
'ice_cream' : 'Ice cream',
'banana' : 'Bananas',
'bread' : 'Bread',
'eggs' : 'Eggs',
'chocolate' : 'Chocolate',
'vegetables' : 'Vegetables',
'fruits' : 'Fruits',
'over_10_item_types': 'Over 10 item types on receipt',
'amount_over_300': 'Receipt value over 300 UAH' # 300 hrynvia ~ US $11.5 in May 2018
}
q_to_d_dict = {} # number of levels per variable
random_threshold = {} # random guess threshold
prop = {} # proportion of class 1 in the data (vs. 0)
for i in q_list:
q_to_d_dict[i] = np.unique(data[i]).shape[0]
random_threshold[i] = 1.0/q_to_d_dict[i]
prop[i] = data[i].sum()/data.shape[0]
q_d_list = [q_to_d_dict[q] for q in q_list] # vector containing number of levels per variable -- where variables are ordered as in q_list
q_d_list_cumsum = np.cumsum(q_d_list) # cumulative sum over variable levels
# total number of levels across variables
n_outs=q_d_list_cumsum[-1]
# logistic regresssion wrapper
def logistic_regression(Xtr, Xts):
return LogisticRegression(penalty='l2', C=0.05, random_state=0, tol=1e-6, max_iter=1e7,
solver='lbfgs', class_weight='balanced').fit(Xtr, Xts)
# train many regressions
def train_eval_regressions(Xtr, Ytr, Xts, Yts):
lin_models = []
for i in tqdm(range(len(q_list))):
clf = logistic_regression(Xtr, Ytr[:,i])
lin_models.append(clf)
auc = analytics_lin(lin_models, Xts, Yts)
return auc, lin_models
# number of unique receipts
data['cid'].unique().shape
# n observations
sum(data['cashier4'] == 1) # cashier 5 on camera
sum(data['cashier4'] == 0) # cashier 4 on camera
# n unique receipts
data['cid'][data['cashier4'] == 1].unique().shape
data['cid'][data['cashier4'] == 0].unique().shape
# TRAINING
np.random.seed(999)
torch.manual_seed(999)
# load a pretrained resnet-50 network
model = get_pretrained()
# modelred is a subset of model that outputs a vector of image features per image
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
n_reps = 20 # number of repeats for 5-fold cross-valaidtion
gkf = KFold(n_splits=5)
results_auc = []
results_patch_importance = []
results_auc_control = []
results_auc_combo = []
# blocking IDs - blocks are based on time period
IDs = data['block'].unique()
for rep in tqdm(range(n_reps)):
# shuffling every repetition to get new folds via cv procedure
np.random.shuffle(IDs)
data_shuffled = data.sample(frac=1.0) # shufling observations too
for trainID, testID in tqdm(gkf.split(IDs)):
# extracting split data
data_train = data_shuffled[data_shuffled['block'].isin(IDs[trainID])]
data_test = data_shuffled[data_shuffled['block'].isin(IDs[testID])]
# calculating class weights to balance data
class_weights = calculate_class_weights(data_train)
# creating data loaders
loader_train = create_dataloader(data_train,rand=False)
if FINETUNE:
loader_train_rand = create_dataloader(data_train,rand=True)
loader_test = create_dataloader(data_test,rand=False)
# finetuning model
if FINETUNE:
finetune_and_save(loader_train_rand, loader_test)
model = torch.load(RESULTS+"/finetuned_model")
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
# extracting image features, labels, and control variables
X_train_raw, Y_train, Z_train = extract_data(loader_train, modelred)
X_test_raw, Y_test, Z_test = extract_data(loader_test, modelred)
# reducing number of features
svd = TruncatedSVD(n_components=500, random_state=0, n_iter=100).fit(X_train_raw)
X_train = svd.transform(X_train_raw)
X_test = svd.transform(X_test_raw)
# training linear models - image features only
auc, lin_models = train_eval_regressions(X_train, Y_train, X_test, Y_test)
results_auc.append(auc)
# image area importance
patch_importance = img_area_importance(modelred, lin_models, svd, data_test, auc)
results_patch_importance.append(patch_importance)
# control variables
auc, lin_models = train_eval_regressions(Z_train, Y_train, Z_test, Y_test)
results_auc_control.append(auc)
# image features + control variables
auc, lin_models = train_eval_regressions(np.concatenate([X_train, Z_train],1), Y_train, np.concatenate([X_test, Z_test],1), Y_test)
results_auc_combo.append(auc)
# saving results of the run
pd.DataFrame(results_auc).to_csv(RESULTS+'/crossvalidation_auc.csv', index=False)
pd.DataFrame(results_auc_control).to_csv(RESULTS+'/crossvalidation_auc_control.csv', index=False)
pd.DataFrame(results_auc_combo).to_csv(RESULTS+'/crossvalidation_auc_combo.csv', index=False)
# saving patch_importance
patch_importance = {}
for q in q_list:
arr = | np.zeros((224,224)) | numpy.zeros |
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-11-12
author: matz
Methods to describe the layout of assemblies in the reactor core and
the coolant in the gap between them
"""
########################################################################
import numpy as np
from dassh.logged_class import LoggedClass
from dassh.correlations import nusselt_db
_sqrt3 = np.sqrt(3)
# Directions around assembly faces
_dirs = {}
_dirs[0] = [(0, -1), (-1, -1), (-1, 0), (0, 1), (1, 1), (1, 0)]
_dirs[2] = _dirs[0][1:] + [_dirs[0][0]]
_dirs[1] = _dirs[0][2:] + _dirs[0][:2]
class Core(LoggedClass):
"""Map the reactor core using the GEODST binary file; set up
parameters to calculate interassembly gap subchannel temperatures
Parameters
----------
path_to_geodst : str
Path to GEODST binary file
gap_flow_rate : float
Interassembly gap flow rate (kg/s)
coolant_obj : float
DASSH Material object for the interassembly gap coolant
inlet_temperature : float
Inlet coolant temperature (K)
test : bool (optional)
If testing, do not run all the instantiation methods; instead,
allow the object to be instantiated without calling them so
they can be called incrementally and independently
Notes
-----
DASSH is intended for use with GEODST files generated from DIF3D-
VARIANT. The geometry for VARIANT calculations is somewhat
restricted to hexagonal geometries, for which each "region"
corresponds to the location of an assembly. The values in the
GEODST region maps correspond to region groups -- groups of
assemblies (i.e. "inner core", "outer core", "driver", etc.).
Here, "region" is used to refer to the values in the GEODST
region map; "assembly" is used to refer to the individual
positions of each region, regardless of its group.
"""
def __init__(self, asm_list_input, asm_pitch, gap_flow_rate,
coolant_obj, inlet_temperature=273.15, model='flow',
test=False, htc_params_duct=None):
"""Instantiate Core object."""
LoggedClass.__init__(self, 4, 'dassh.core.Core')
if model not in ['flow', 'no_flow', 'duct_average', None]:
msg = 'Do not understand input inter-assembly gap model: '
self.log('error', msg + model)
# --------------------------------------------------------------
# Identify GEODST periodicity: if not full core, it can be
# either 60 or 120 degree
self.n_ring = count_rings(len(asm_list_input))
self.n_asm = np.sum(~np.isnan(asm_list_input))
self.hex_option = 0 # full core only, no periodicity
self.asm_pitch = asm_pitch # m
self.gap_coolant = coolant_obj
self.gap_coolant.update(inlet_temperature)
self.gap_flow_rate = gap_flow_rate
self.coolant_gap_params = \
{'Re': 0.0, # bundle-average Reynolds number
'Re_sc': np.zeros(2), # subchannel Reynolds numbers
'vel': 0.0, # bundle-average coolant velocity
'ff': np.zeros(2), # subchannel friction factors
'htc': np.zeros(2)} # heat transfer coefficients
self.z = [0.0]
self.model = model
if htc_params_duct:
self._htc_params = htc_params_duct
else:
self._htc_params = [0.025, 0.8, 0.8, 7.0]
# --------------------------------------------------------------
# Don't run the more complex, tested methods if testing; let
# them be implemented independently in pytest.
if test:
return
# --------------------------------------------------------------
# ASSEMBLY AND SUBCHANNEL MAPPING
# Assign each assembly (region) in the X-Y region map an ID
# number; although the regions will change between axial
# meshes, the filled positions will not. Therefore, this
# map only needs to be made for one axial mesh.
# Sets attribute self.asm_map
self.asm_map = map_asm(asm_list_input)
# Map neighbors for each assembly based on problem symmetry
# Again, only one map needed for all axial meshes
# Sets attribute self.asm_adj
self.asm_adj = map_adjacent_assemblies(self.asm_map)
####################################################################
# SETUP METHODS
####################################################################
def load(self, asms):
"""Load assemblies into core positions and import their
characteristics
Parameters
----------
asms : list
List of DASSH.Assembly objects
Returns
-------
None
Notes
-----
Although the inter-assembly gap coolant is tracked on the
finest mesh, information is required for all assemblies in
order to approximate duct wall and coolant temperatures back
and forth between finer and coarser meshes.
"""
assert len(asms) == self.n_asm
self.duct_oftf = asms[0].duct_oftf # Duct outer FTF
self.hex_side_len = self.duct_oftf / _sqrt3
# Set up inter-assembly gap subchannel sizes and dimensions
self.d_gap = self.asm_pitch - self.duct_oftf # Gap "width"
# Set up interassembly gap subchannel attributes
# (1) Geometric parameters of assembly with which subchannels
# are aligned: pin pitch, corner perimeter, side sc per hex side
self._geom_params = self._collect_sc_geom_params(asms)
# (2) assembly-subchannel adjacency - return as nested lists
# for later use; store as numpy array
asm_adj_sc = self._map_asm_gap_adjacency()
# Combine nested lists: a little ugly bc not all same length
max_scpa = max([sum([len(x) for x in y]) for y in asm_adj_sc])
self._asm_sc_adj = np.zeros((self.n_asm, max_scpa), dtype=int)
for a in range(self.n_asm):
tmp = np.array([x for l in asm_adj_sc[a] for x in l])
tmp = tmp.flatten()
self._asm_sc_adj[a, :tmp.shape[0]] = tmp
# (3) boundaries betwee assembly-adjacent subchannels
self._asm_sc_xbnds = self._calculate_gap_xbnds()
# (4) types of assembly-adjacent subchannels
# (5) global array of subchannel types
tmp = self._determine_gap_sc_types()
self._asm_sc_types = [np.array(
[li for l in x for li in l]).flatten() for x in tmp[0]]
self._sc_types = np.array(tmp[1])
# Calculate some follow-up parameters:
# Number of subchannels
self.n_sc = np.max(self._asm_sc_adj)
# Number of subchannels adjacent to each assembly
self._n_sc_per_asm = np.count_nonzero(self._asm_sc_adj, axis=1)
# Global subchannel-subchannel adjacency
self._sc_adj = self._find_adjacent_sc(asm_adj_sc)
# Subchannel area, hydraulic diameter, distances to neighbors
self.gap_params = {}
self.gap_params['wp'] = self._calculate_sc_wp()
self.gap_params['asm wp'] = self._calculate_asm_sc_wp()
self.gap_params['area'] = self._calculate_sc_area()
self.gap_params['L'] = self._calculate_dist_between_sc()
assert not np.any(0 in np.sum(self.gap_params['L'], axis=1))
self.gap_params['de'] = self._calculate_sc_de()
# Core-total parameters
self.gap_params['total area'] = np.sum(self.gap_params['area'])
self.gap_params['total wp'] = 6 * self.hex_side_len * self.n_asm
self.gap_params['total de'] = (4 * self.gap_params['total area']
/ self.gap_params['total wp'])
# Fractional area
self.gap_params['area frac'] = (self.gap_params['area']
/ self.gap_params['total area'])
# Flow parameters
self._sc_mfr = self.gap_flow_rate * self.gap_params['area frac']
if self.model == 'flow':
self._inv_sc_mfr = 1 / self._sc_mfr
# Reynolds number constant
self.coolant_gap_params['_Re_sc'] = \
self._sc_mfr * self.gap_params['de'] / self.gap_params['area']
# Interior coolant temperatures; shape = n_axial_mesh x n_sc
self.coolant_gap_temp = np.ones(self.n_sc)
self.coolant_gap_temp *= self.gap_coolant.temperature
# Set up convection/conduction utility attributes
self._make_conv_mask()
self._make_cond_mask()
# Update coolant gap params based on inlet temperature
self._update_coolant_gap_params(self.gap_coolant.temperature)
# Track the energy given from the assemblies to the gap
# coolant; this should match roughly with what the assembly
# energy balance reports is lost through the outermost duct.
# The match won't be exact because the assembly calculation
# assumes heat from the corner subchannels is transferred
# through an area equal to that at the midplane of the duct,
# whereas here it is transferred through the area at the
# outside of the duct, since that is the same for all
# assemblies. I have confirmed that adjusting for this gives
# the matching result. This array will not sum to zero.
# self.ebal = np.zeros(())
self.ebal = {}
self.ebal['asm'] = np.zeros(self._asm_sc_adj.shape)
# MAP INTER-ASSEMBLY GAP; DEFINE GEOMETRY --------------------------
def _collect_sc_geom_params(self, asm_list):
"""Save attributes from assemblies that define gap subchannel
geometry
Parameters
----------
asm_list : list
List of DASSH Assembly objects
Returns
-------
dict
Dict contains two items:
- 'dims': numpy.ndarray, N_asm x 6 x 2
Pin pitch and corner perimter for gap subchannels on
each hex side of each assembly
- 'sc_per_side': numpy.ndarray, N_asm x 6
Number of gap edge subchannels on each hex side of
each assembly
"""
dims = np.zeros((self.n_asm, 6, 2))
edge_sc_per_side = np.zeros((self.n_asm, 6), dtype=int)
for asm in range(self.n_asm):
for side in range(6):
# Figure out from which assembly to get mesh params
if self.asm_adj[asm][side] - 1 >= 0:
adj = asm_list[self.asm_adj[asm][side] - 1]
asm_with_mesh_params, sc_per_side = \
_which_asm_has_finer_mesh(asm_list[asm], adj)
else:
asm_with_mesh_params, sc_per_side = \
_which_asm_has_finer_mesh(asm_list[asm], None)
# Save the defining pin-pitch and corner perimeter
pin_pitch = 0.0
if asm_with_mesh_params.has_rodded:
pin_pitch = asm_with_mesh_params.rodded.pin_pitch
dwc = asm_with_mesh_params.rodded.d['wcorner'][-1, -1]
else:
pin_pitch = 0.0
dwc = 0.5 * asm_with_mesh_params.duct_oftf / _sqrt3
dims[asm, side] = [pin_pitch, dwc]
# Save the defining edge_sc_per_side
edge_sc_per_side[asm, side] = sc_per_side
return {'dims': dims, 'sc_per_side': edge_sc_per_side}
def _map_asm_gap_adjacency(self):
"""Map the interassembly gap subchannels that surround each
assembly in the core.
Parameters
----------
None
Returns
-------
list
Nested list-of-lists containing assembly-subchannel
adjacency per-hex-side
"""
sc_idx = 0 # running subchannel index
asm_adj_sc = [] # subchannels adjacent to each asm
for asm in range(self.n_asm):
# pre-allocate temp arrays to fill with values when counting
tmp_asm_adj_sc = []
for side in range(6):
# Index the subchannels along this hex side
tmp, sc_idx = self._index_gap_sc(
asm,
side,
sc_idx,
self._geom_params['sc_per_side'][asm][side],
asm_adj_sc)
tmp_asm_adj_sc.append(tmp)
# Add the temporary arrays to main arrays
asm_adj_sc.append(tmp_asm_adj_sc)
# Cast all value as int
for i in range(len(asm_adj_sc)):
for j in range(len(asm_adj_sc[i])):
asm_adj_sc[i][j] = [int(x) for x in asm_adj_sc[i][j]]
return asm_adj_sc
def _calculate_gap_xbnds(self):
"""Calculate the boundaries between the subchannels along each
assembly hex side
Parameters
----------
None
Returns
-------
numpy.ndarray
Boundaries for the interassembly gap subchannels
that surround each assembly (sc_per_side+1 x 6)
"""
asm_sc_xbnds = [] # 1D (along hex side) coords of gap SC bnds
for asm in range(self.n_asm):
tmp_sc_xbnds = []
for side in range(6):
# Figure out from which assembly to get mesh params
# if self.asm_adj[asm][side] - 1 > 0:
# adj = asm_list[self.asm_adj[asm][side] - 1]
# asm_with_mesh_params, sc_per_side = \
# _which_asm_has_finer_mesh(asm_list[asm], adj)
# else:
# asm_with_mesh_params, sc_per_side = \
# _which_asm_has_finer_mesh(asm_list[asm], None)
#
# Get the boundaries of the subchannels
hex_side_len = self.duct_oftf / _sqrt3
starting_x = hex_side_len * side
# Edge length = pin pitch
# Corner "half-perimeter"
pp, dwc = self._geom_params['dims'][asm, side]
# Add corner-edge boundary based on starting point
tmp_sc_xbnds.append(starting_x + dwc)
# Add subsequent edge boundaries
sc_per_side = self._geom_params['sc_per_side'][asm, side]
for sci in range(sc_per_side):
tmp_sc_xbnds.append(tmp_sc_xbnds[-1] + pp)
# Add the temporary arrays to main arrays
asm_sc_xbnds.append(tmp_sc_xbnds)
# Convert to numpy array and return
# max_scpa = max([len(x) for x in asm_sc_xbnds])
# _asm_sc_xbnds = np.zeros((self.n_asm, max_scpa))
_asm_sc_xbnds = np.zeros(self._asm_sc_adj.shape)
for a in range(self.n_asm):
tmp = np.array(asm_sc_xbnds[a])
_asm_sc_xbnds[a, :tmp.shape[0]] = tmp
return _asm_sc_xbnds
def _calculate_gap_xpts(self, asm_list):
"""Determine the center point of each gap/duct subchannel
connection for each assembly
Parameters
----------
asm_list : list
List of DASSH Assembly objects
Notes
-----
Currently not used. May refine and active in the future
"""
asm_sc_xpts = [] # 1D (along hex side) coords of gap SC
for a in range(self.n_asm):
tmp_sc_xpts = []
for side in range(6):
# Figure out from which assembly to get mesh params
if self.asm_adj[a][side] - 1 > 0:
adj = asm_list[self.asm_adj[a][side] - 1]
asm, scps = _which_asm_has_finer_mesh(asm_list[a], adj)
else:
asm, scps = _which_asm_has_finer_mesh(asm_list[a], adj)
xpts = [r.x_pts for r in asm.region]
len_of_xpts = [len(x) for x in xpts]
max_len = max(len_of_xpts)
matching_xpts = [x for x in xpts if len(x) == max_len]
tmp_sc_xpts.append(matching_xpts[0])
# Add the temporary arrays to main arrays
asm_sc_xpts.append(tmp_sc_xpts)
return asm_sc_xpts
def _determine_gap_sc_types(self):
"""Determine the gap subchannel types around each assembly
and globally
Parameters
----------
None
Returns
-------
tuple
Tuple of two lists containing (1) the subchannel types
relative to each loaded assembly and (2) relative to the
global subchannel indexing, respectively
"""
sc_types = [] # Global (1D) gap SC types
asm_sc_types = [] # Assembly adjacent gap SC types
for asm in range(self.n_asm):
# pre-allocate temp arrays to fill with values when counting
tmp_asm_sc_types = []
for side in range(6):
to_add = []
scps = self._geom_params['sc_per_side'][asm][side]
# If newly indexed subchannels: count new types
if self._need_to_count_side(asm, side):
# Newly counted edge subchannels along that side
to_add = [0 for i in range(scps)]
# Check if you need to count the trailing corner
if self._need_to_count_corner(asm, side):
to_add.append(1)
sc_types += to_add
# Add assembly-adjacent subchannel types to asm list
tmp_asm_sc_types.append([0 for i in range(scps)])
tmp_asm_sc_types[-1].append(1)
# Add the temporary arrays to main arrays
asm_sc_types.append(tmp_asm_sc_types)
return asm_sc_types, sc_types
def _calculate_dist_between_sc_OLD(self):
"""Calculate distance between subchannel centroids
Parameters
----------
None
Returns
-------
numpy.ndarray
Array (N_sc x 3) of distances to adjacent subchannels
Notes
-----
Carried out in three loops:
1. Determine distances per assembly, per hex side
2. Combine hex sides --> distances per assembly
3. Globalize --> distances between all subchannels
"""
asm_sc_dist = []
for asm in range(self.n_asm):
tmp_asm_sc_dist = []
for side in range(6):
# Figure out from which assembly to get mesh params
# if self.asm_adj[asm][side] - 1 > 0:
# adj = asm_list[self.asm_adj[asm][side] - 1]
# asm_with_mesh_params, scps = \
# _which_asm_has_finer_mesh(asm_list[asm], adj)
# else:
# asm_with_mesh_params, scps = \
# _which_asm_has_finer_mesh(asm_list[asm], None)
scps = self._geom_params['sc_per_side'][asm, side]
pp = self._geom_params['dims'][asm, side, 0]
# Get the distances between subchannels
gap_side_len = self.duct_oftf / _sqrt3
# (add lil extra beyond duct surface corner to reach
# center of corner channel along middle of gap)
gap_side_len += _sqrt3 * self.d_gap / 3
# print(gap_side_len, self.duct_oftf, self.d_gap)
L = np.zeros((scps + 1, 3))
if scps >= 1: # At least one edge SC
# Will for sure have edge-corner connection
L11 = pp
L12 = 0.5 * (gap_side_len - scps * L11) + 0.5 * L11
L[0, 0] = L12 # First edge, look back: edge-corner
L[-2, 1] = L12 # Last edge, look fwd: edge-corner
L[-1, 0] = L12 # Last corner, look back: edge-corner
if scps > 1: # More than one edge SC: L11 too
L[1:-1, 0] = L11 # other edge look back: edge-edge
L[:-2, 1] = L11 # other edge SC look fwd: edge-edge
else: # Corner-corner connection
L[0, 2] = gap_side_len
# print(asm, side, L)
tmp_asm_sc_dist.append(L)
# Add the temporary arrays to main arrays: need second loop
# because relies on result from first loop
for side in range(6):
# Need to fill in the trailing corner "looking forward" dist
# Example: hex side 1 traiing corner does not have a value
# filled in to look forward to the next side. Need to take
# the first edge subchannel on hex side 2 "looking back"
# distance and pass it to the hex side 1 trailing corner.
L12_prev = tmp_asm_sc_dist[side][0, 0]
tmp_asm_sc_dist[side - 1][-1, 1] = L12_prev
tmp_asm_sc_dist = np.vstack(tmp_asm_sc_dist)
asm_sc_dist.append(tmp_asm_sc_dist)
# Now globalize - relies on previously assigned attributes
L_global = np.zeros((self.n_sc, 3))
# print('n_sc', self.n_sc)
for a in range(self.n_asm):
# print(a, asm_sc_dist[a].shape, self._asm_sc_adj[a].shape)
for i in range(self._asm_sc_adj[a].shape[0]):
sci = self._asm_sc_adj[a][i] - 1
if sci < 0:
continue
if np.all(L_global[sci] == 0):
L_global[sci] = asm_sc_dist[a][i]
else:
if self._asm_sc_types[a][i] == 0:
continue
else:
# If all values filled, no need to replace any
if not np.any(L_global[sci] == 0):
continue
else: # Fill in remaining corner value
s = np.count_nonzero(
self._asm_sc_types[a][:i])
L12 = (self._geom_params['dims'][a, s, 1]
+ _sqrt3 * self.d_gap / 6)
L12 += 0.5 * self._geom_params['dims'][a, s, 0]
L_global[sci, 2] = L12
# L_global[sci, 2] = asm_sc_dist[a][i, 1]
return L_global
def _calculate_dist_between_sc(self):
"""Calculate distance between subchannel centroids
Parameters
----------
None
Returns
-------
numpy.ndarray
Array (N_sc x 3) of distances to adjacent subchannels
Notes
-----
Carried out in three loops:
1. Determine distances per assembly, per hex side
2. Combine hex sides --> distances per assembly
3. Globalize --> distances between all subchannels
"""
gap_side_len = self.duct_oftf / _sqrt3
gap_side_len += _sqrt3 * self.d_gap / 3
lil_bit = _sqrt3 * self.d_gap / 6
L_global = np.zeros((self.n_sc, 3))
for i in range(self.n_sc):
# SKIP IF DONE: if all cols filled, don't need to write more
filled_pos = np.count_nonzero(L_global[i])
if filled_pos - self._sc_types[i] == 2:
continue
ip1 = i + 1
asm, loc = np.where(self._asm_sc_adj == ip1)
# Edge subchannels: adj asm share properties, so just use
# those from the first in the lookup list
if self._sc_types[i] == 0:
side = np.count_nonzero(self._asm_sc_types[asm[0]][:loc[0]])
pp, dwc = self._geom_params['dims'][asm[0], side]
for j in range(3):
sc_adj = self._sc_adj[i, j] - 1
if sc_adj < 0:
continue
if self._sc_types[sc_adj] == 1:
L_global[i, j] = 0.5 * pp + dwc + lil_bit
else:
L_global[i, j] = pp
# Corner subchannels: look to neighbors
else:
for j in range(3):
sc_adj = self._sc_adj[i, j] - 1
if sc_adj < 0:
continue
asm_adj, loc_adj = \
np.where(self._asm_sc_adj == self._sc_adj[i, j])
side_adj = np.count_nonzero(
self._asm_sc_types[asm_adj[0]][:loc_adj[0]])
pp, dwc = self._geom_params['dims'][asm_adj[0], side_adj]
if self._sc_types[sc_adj] == 1:
L_global[i, j] = gap_side_len
else:
L_global[i, j] = 0.5 * pp + dwc + lil_bit
return L_global
def _index_gap_sc(self, asm, side, sc_id, sc_per_side, already_idx):
"""Count gap subchannel indices along an assembly side
Parameters
----------
asm : int
Active assembly index
side : int
Active hex side
sc_id : int
Active gap subchannel index
sc_per_side : int
Number of gap edge subchannels along this hex side
already_idx : list
List of lists containing the already-indexed adjacency
between previous assemblies and gap subchannels
Returns
-------
list
Subchannel indices along the active hex side of the
active assembly
"""
if self._need_to_count_side(asm, side):
# Count edge subchannels along that side
to_add = list(np.arange(sc_id + 1, sc_id + 1 + sc_per_side))
sc_id += sc_per_side # update the sc index
# Check if you need to count the trailing corner
if self._need_to_count_corner(asm, side):
sc_id += 1
to_add.append(sc_id)
# If you don't need to count a new corner sc, get
# the existing corner from the adjacent assembly
else:
to_add.append(
self._find_corner_sc(
asm, side, already_idx))
else:
# get the subchannels that live here, including
# the trailing corner, which must already be
# defined if these side subchannels are defined.
to_add = self._find_side_sc(asm, side, already_idx)
return to_add, sc_id
def _find_side_sc(self, asm, side, asm_adj_sc):
r"""Find existing side subchannels
Parameters
----------
asm : int
Active assembly ID (index)
side : int
Hexagon side; side 0 is the primary diagonal
asm_adj_sc : list
List of numpy.ndarray containing the indices for the
interassembly gap subchannels that surround each assembly
(sc_per_side+1 x 6)
Returns
-------
numpy.ndarray
Indices of existing side (and trailing corner) subchannels
along the current side of the active assembly
Notes
-----
We are walking clockwise around the active assembly hexagon.
If this function is being called, the subchannels along this
side have been defined for the adjacent assembly. We want to
get these subchannels from the adjacent assembly.
Because we are walking clockwise around the active assembly,
we are walking counterclockwise along the faces of each of the
adjacent assemblies. This means once we identify the which
subchannels exist in the gap between these two assemblies, we
need to:
(1) Flip their order
(2) Drop the corner subchannel (it was the trailing corner
for the adjacent assembly but because the order is
reversed it is the leading corner for the active assembly)
(3) Get the trailing corner, which is the trailing corner from
the "previous" side of the adjacent assembly.
Graphic example
---------------
Interassembly gap subchannels 1 - 5 have already been defined
for assembly "Neighbor". As we walk clockwise around assembly
"Active", we encounter these subchannels in the opposite
direction.
* *
* *
* __1__ *
* __2__ *
Neighbor * __3__ * Active
* __4__ *
* 5 *
* *
* *
When this function is called, we will have already obtained
corner subchannel 5 from marching up the preceding side of
the active assembly; we will want to obtain side subchannels
4, 3, and 2, as well as corner subchannel 1, which we know
has been defined because the side channels have been defined.
"""
neighbor = self.asm_adj[asm][side]
neighbor_side = side - 3
# if neighbor_side < 0:
# neighbor_side += 6
neighbor_side_sc = asm_adj_sc[neighbor - 1][neighbor_side]
neighbor_side_sc = neighbor_side_sc[:-1] # drop corner
neighbor_side_sc = neighbor_side_sc[::-1] # flip direction
# get last entry from neighbor asm previous side
neighbor_side_sc = np.append(neighbor_side_sc,
asm_adj_sc[neighbor - 1]
[neighbor_side - 1][-1])
return neighbor_side_sc
def _find_corner_sc(self, asm, side, asm_adj_sc):
r"""Find the (existing) corner subchannel that "ends" the
current hexagon side.
Parameters
----------
asm : int
Active assembly ID (index)
side : int
Hexagon side; side 0 is the primary diagonal
asm_adj_sc : list
List of numpy.ndarray containing the indices for the
interassembly gap subchannels that surround each assembly
(sc_per_side+1 x 6)
Returns
-------
numpy.ndarray
Indices of existing side (and trailing corner) subchannels
along the current side of the active assembly
Notes
-----
If this function is being called, it's because the subchannels
along the current side have NOT yet been defined. This means
that the neighboring assembly across the side subchannels has
not had its interassembly gap subchannels defined. Therefore,
we should not bother to look at it to determine the corner
subchannel. Instead, we'll look at the "next" assembly,
referred to here as "neighbor plus one".
Graphic example
---------------
Interassembly gap subchannels 1 - 4 are being defined for the
active assembly. The subchannels for "Neighbor" have not yet
been defined. The subchannels for "Neighbor +1" have been
defined; we are seeking to determine corner subchannel "c".
* Neighbor +1 *
/ * * \
* a / * * \ e *
* b / * \ d *
* __c__ *
* __4__ *
Neighbor * __3__ * Active
* __2__ *
* 1 *
* *
* *
"""
# loc = np.where(self.asm_map == asm + 1)
# neighbor assembly across the next face (neighbor plus one)
neighbor_p1 = self.asm_adj[asm][side - 5]
# neighbor_p1_loc = (loc[0] + _dirs[self.hex_option][side - 5][0],
# loc[1] + _dirs[self.hex_option][side - 5][1])
# neighbor_p1_side = side - 2
return asm_adj_sc[neighbor_p1 - 1][side - 2][-1]
def _need_to_count_side(self, asm, side):
"""Determine whether an interassembly gap side channel needs
to be counted or whether it has already been counted by the
adjacent assembly"""
neighbor = self.asm_adj[asm][side]
if neighbor > 0:
# Redefine neighbor according to the assembly map
# If any of the indices in the neighbor location are
# outside the assembly map (either less than zero or
# greater than the assembly length), need to count the
# side.
loc = np.where(self.asm_map == asm + 1)
neighbor_loc = (loc[0] + _dirs[self.hex_option][side][0],
loc[1] + _dirs[self.hex_option][side][1])
if (not all(idx >= 0 and idx < len(self.asm_map)
for idx in neighbor_loc)):
return True
# Otherwise, the neighbor is within the bounds of the
# assembly map array and can be defined.
else:
neighbor = self.asm_map[neighbor_loc]
# If the neighbor ID is 0, the side must be defined.
if neighbor == 0:
return True
# If the neighbor ID is greater than that of the active
# assembly, its gap subchannels have not been counted,
# and therefore the side must be defined.
elif asm + 1 < neighbor:
return True
# If none of the above are true, then gap subchannels
# along this side have been defined and we don't need
# to define them.
else:
return False
# If the neighbor assembly in the neighbors matrix is defined
# as 0, then we have to define gap subchannels along this side.
else:
return True
def _need_to_count_corner(self, asm, side):
"""Determine whether an interassembly gap corner channel
needs to be counted or whether it has already been counted
by one of the adjacent assemblies.
Notes
-----
If this method is being called, the gap subchannels on the
active side needed to be defined. This means that the gap
subchannels for the immediate neighbor have NOT been defined.
Therefore, we need to look at the "neighbor plus one" assembly
to determine whether we need to define a new gap corner
subchannel.
"""
# neighbor plus one
neighbor_p1 = self.asm_adj[asm][side - 5]
if neighbor_p1 > 0:
# Redefine neighbor-plus-one according to the assembly
# map. If any of the indices in the neighbor-plus-one
# location are outside the assembly map (either less than
# zero or greater than the assembly length), need to count
# the side.
loc = np.where(self.asm_map == asm + 1)
loc = (loc[0] + _dirs[self.hex_option][side - 5][0],
loc[1] + _dirs[self.hex_option][side - 5][1])
if (not all(idx >= 0 and idx < len(self.asm_map)
for idx in loc)):
return True
# Otherwise, the neighbor-plus-one is within the bounds
# of the assembly map array and can be defined.
else:
neighbor_p1 = self.asm_map[loc]
# If the neighbor-plus-one ID is 0, the side must be
# defined.
if neighbor_p1 == 0:
return True
# If the neighbor-plus-one ID is greater than that of
# the active assembly, its gap subchannels have not
# been counted; therefore the side must be defined.
elif asm + 1 < neighbor_p1:
return True
# If none of the above are true, then gap subchannels
# along this side have been defined and we don't need
# to define them.
else:
return False
# If the neighbor-plus-one assembly has ID equal to 0, then the
# corner gap subchannel must be defined.
else:
return True
def _find_adjacent_sc(self, asm_sc_adj):
"""Use the array mapping interassembly gap subchannels to
adjacent assemblies to identify which subchannels are adjacent
to each other
Parameters
----------
asm_sc_adj : list
Nested lists containing subchannel indices (base 1)
adjacent to each assembly; size = N_asm x 6 x N_sc_on_side
(note that "N_sc_on_side" can vary)
Returns
-------
numpy.ndarray
Array (N_gap_sc x 3) indicating adjacency between gap
subchannels
"""
sc_adj = np.zeros((self.n_sc, 3), dtype='int')
for ai in range(len(asm_sc_adj)):
asm_sc = asm_sc_adj[ai]
for side in range(len(asm_sc)):
# for sci in range(len(asm_sc[side]) - 1):
for sci in range(len(asm_sc[side])):
# Look to trailing corner on previous side
if sci == 0:
# Fill trailing corner's value into active index
sc = asm_sc[side][sci]
if asm_sc[side - 1][-1] not in sc_adj[sc - 1]:
idx = np.where(sc_adj[sc - 1] == 0)[0][0]
sc_adj[sc - 1, idx] = asm_sc[side - 1][-1]
# Fill active index into trailing corner
sc = asm_sc[side - 1][-1]
if asm_sc[side][sci] not in sc_adj[sc - 1]:
# ADDED 2021-04-22
# Wanted to make side 0 trailing corner
# adjacency order of the rest of the SC
if side == 0:
idx = 1
else:
idx = np.where(sc_adj[sc - 1] == 0)[0][0]
sc_adj[sc - 1, idx] = asm_sc[side][sci]
# For the sc in current index: map the sc in next index
if sci < len(asm_sc[side]) - 1:
sc = asm_sc[side][sci]
if asm_sc[side][sci + 1] not in sc_adj[sc - 1]:
idx = np.where(sc_adj[sc - 1] == 0)[0][0]
sc_adj[sc - 1, idx] = asm_sc[side][sci + 1]
# For the sc in next index; map the sc in current index
sc = asm_sc[side][sci + 1]
if asm_sc[side][sci] not in sc_adj[sc - 1]:
idx = np.where(sc_adj[sc - 1] == 0)[0][0]
sc_adj[sc - 1, idx] = asm_sc[side][sci]
return sc_adj
def _calculate_sc_wp(self):
"""Calculate wetted perimeter of each gap subchannel
Parameters
----------
None
Returns
-------
numpy.ndarray
Wetted perimeter (m) of each subchannel (N_sc x 1)
"""
# Loop over all assemblies to calculate WP of adjacent SC
wp = np.zeros(self.n_sc)
hex_perim = self.duct_oftf * 6 / np.sqrt(3)
for a in range(self.n_asm):
xtmp = self._asm_sc_xbnds[a]
xtmp = xtmp[self._asm_sc_adj[a] > 0]
for i in range(len(xtmp) - 1):
sci = self._asm_sc_adj[a][i] # <-- remember, base-1 idx
if sci < 1:
continue
# Just duct wetted perimeter; mult by width later
wp[sci - 1] += xtmp[i + 1] - xtmp[i]
# WP of the last one needs to wrap around to the first
sci = self._asm_sc_adj[a][i + 1]
wp[sci - 1] += hex_perim - xtmp[-1] + xtmp[0]
# Corrections for outermost subchannels.
# Edge subchannels need WP0 x 2
for i in range(self.n_sc):
asm, loc = np.where(self._asm_sc_adj == i + 1)
if self._sc_types[i] == 0:
if len(asm) == 1: # <-- this means it's an outer SC
wp[i] *= 2 # haven't counted "non-asm" wall
else: # Treat corners adjacent one or two assemblies
if len(asm) == 1:
wp[i] *= 2
wp[i] += 2 * self.d_gap / _sqrt3
elif len(asm) == 2:
x = np.zeros((2, 2))
for a in range(2):
scps = self._geom_params['sc_per_side'][asm[a]]
tmp = np.cumsum(scps)
tmp += np.arange(0, 6, 1)
s1 = np.where(tmp == loc[a])[0][0]
if s1 == 5:
s2 = 0
else:
s2 = s1 + 1
x[a, 0] = self._geom_params['dims'][asm[a]][s1][1]
x[a, 1] = self._geom_params['dims'][asm[a]][s2][1]
# Choose the nonshared ones
dwc = np.zeros(2)
for a in range(2):
if x[a, 0] in x[a - 1]:
dwc[a] = x[a, 1]
else:
dwc[a] = x[a, 0]
wp[i] += dwc[0] + dwc[1]
else:
continue
return wp
def _calculate_asm_sc_wp(self):
"""Calculate wetted perimeter of each gap subchannel relative
to its adjacent assembly
Parameters
----------
None
Returns
-------
numpy.ndarray
Wetted perimeter (m) of each subchannel (N_asm x N_scpa)
"""
# Loop over all assemblies to calculate WP of adjacent SC
wp = np.zeros((self._asm_sc_xbnds.shape))
hex_perim = self.duct_oftf * 6 / np.sqrt(3)
for a in range(self.n_asm):
xtmp = self._asm_sc_xbnds[a]
xtmp = xtmp[self._asm_sc_adj[a] > 0]
for i in range(len(xtmp) - 1):
if self._asm_sc_adj[a][i] < 1: # <-- remember, base-1 idx
continue
# Just duct wetted perimeter; mult by width later
wp[a, i] = xtmp[i + 1] - xtmp[i]
# WP of the last one needs to wrap around to the first
wp[a, i + 1] += hex_perim - xtmp[-1] + xtmp[0]
return wp
def _calculate_sc_area(self):
"""Calculate the flow area of each gap subchannel
Parameters
----------
None
Returns
-------
numpy.ndarray
Area (m^2) of each gap subchannel (N_sc x 1)
"""
# Already have WP, which includes all adjacency. If adjacent
# to 2 or 3 neighbors, need: WP / 2 as the "length" of the SC
# otherwise: use WP
# Width is the gap width
# NOTE: for corners, need to add that lil center triangle if
# next to a neighbor; if alone, add different thing
corner_neighbor = self.d_gap**2 * _sqrt3 / 4
corner_no_neighbor = self.d_gap**2 * _sqrt3 / 3
area = self.gap_params['wp'] * self.d_gap
for i in range(self.n_sc):
asm, loc = np.where(self._asm_sc_adj == i + 1)
if self._sc_types[i] == 0:
area[i] *= 0.5
else:
if len(asm) == 1:
area[i] = self.gap_params['asm wp'][asm[0], loc[0]]
area[i] *= self.d_gap
area[i] += corner_no_neighbor
else:
area[i] *= 0.5
area[i] += corner_neighbor
return area
def _calculate_sc_de(self):
"""Calculate hydraulic diameter of each gap subchannel
Parameters
----------
None
Returns
-------
numpy.ndarray
Hydraulic diameter (m) of each subchannel (N_sc x 1)
"""
# De = 4A / WP; already calculated area, WP
return 4 * self.gap_params['area'] / self.gap_params['wp']
def _make_conv_mask(self):
"""Create masks to quickly handle the subchannel-duct adjacency
Notes
-----
Each gap coolant subchannel will touch either:
- 1 duct (if it's at the edge of the problem);
- 2 ducts (if an edge gap between two assemblies);
- or 3 ducts (if it's a corner gap between 3 assemblies).
We already have an array that links the subchannels to the
adjacent assemblies, and from that, we can figure out which
duct meshes each subchannel is in contact with. Because this
is static throughout the problem, we can precompute this
relationship and save it in the form of a mask: a set of 1s
and 0s that indicate whether to keep or discard a value.
We will have 3 masks, one for each possible adjacency that a
subchannel can have. We'll naively grab duct temperatures that
appear to match with the subchannel, but then we'll apply these
masks to ensure that they're only added if the adjacency exists
"""
self._conv_util = {}
# Collect assembly, hex-side, and side-location indices for
# each duct mesh; if no match, use -1 as a placeholder; this
# is what we'll filter on later.
a = [[], [], []]
# Collect convection constants in array: need "wetted perimeter"
# of subchannel connection with each adjacent assembly (up to 3)
self._conv_util['const'] = np.zeros((self.n_sc, 3))
for sci in range(self.n_sc):
asm, loc = np.where(self._asm_sc_adj == sci + 1)
# asm = [ai for ai in range(len(self._asm_sc_adj))
# if sci + 1 in self._asm_sc_adj[ai]]
# loc = [np.where(self._asm_sc_adj[ai] == sci + 1)[0][0]
# for ai in asm]
# calculate "duct index" based on hex-side and side-loc
# new = side * (self._sc_per_side + 1) + loc
# Connection 0: always at least one gap-duct connection
a[0].append((asm[0], loc[0]))
# Try the remaining two possible connections
for i in range(2):
try:
a[i + 1].append((asm[i + 1], loc[i + 1]))
except IndexError:
a[i + 1].append((-1, -1))
# Calculate convection constant based on sc-duct connections
for i in range(len(asm)):
self._conv_util['const'][sci, i] = \
self.gap_params['asm wp'][asm[i], loc[i]]
# Now we're going to collect the indices where gap and duct
# match up; these are where we will go pull out temperatures
# from the incoming duct array. Note that because we used -1
# as a placeholder and -1 is shorthand for the last value in
# the array, we'll be pulling some bad values. Anywhere that
# a = (-1, -1), the indices will also equal -1. This is what
# the masks will handle.
inds = []
for i in range(3):
inds.append(np.moveaxis(np.array(a[i]), -1, 0))
self._conv_util['inds'] = inds
# Now let's create the masks. Anywhere that self._inds = -1,
# we will set the mask equal to 0 so that any values captured
# by that index are eliminated. There are only two masks bc
# the first temperature returned is always valid (since there
# is always at least one duct-gap connection)
self._conv_util['mask1'] = self._conv_util['inds'][1][0] >= 0
self._conv_util['mask2'] = self._conv_util['inds'][2][0] >= 0
if self.model == 'no_flow':
self._conv_util['const'] *= (2 / self.d_gap)
def _make_cond_mask(self):
"""Just like for the convection lookup, make a mask that can
accelerate the adjacent subchannel conduction lookup
Notes
-----
This doubles as a shortcut to storing the constant parts of
the conduction resistance
"""
# The conduction resistance constant is just the gap width
# "d_gap" divided by the heat transfer distance ("L", the
# distance between subchannels). Where a subchannel doesn't
# have a neighbor, L=0 and can't be in the denominator. In
# that case, we set the conduction constant to zero so that
# heat transfer cannot occur.
self._Rcond = np.divide(self.d_gap, self.gap_params['L'],
out=np.zeros_like(self.gap_params['L']),
where=(self.gap_params['L'] != 0))
####################################################################
# TEMPERATURE PROPERTIES
####################################################################
@property
def avg_coolant_gap_temp(self):
"""Return the average temperature of the gap coolant
subchannels at the last axial level"""
# tot = 0.0
# for i in range(len(self.coolant_gap_temp)):
# tot += (self.gap_params['area'][self.sc_types[i]]
# * self.coolant_gap_temp[i])
# return tot / self.gap_params['total area']
# return np.sum(self.coolant_gap_temp
# * self.gap_params['area frac'])
return np.dot(self.coolant_gap_temp,
self.gap_params['area frac'])
def adjacent_coolant_gap_temp(self, id):
"""Return the coolant temperatures in gap subchannels
around a specific assembly at the last axial level
Parameters
----------
id : int
Assembly ID
Notes
-----
If necessary, approximate the temperatures from the interasm
gap mesh to the assembly duct mesh.
"""
return self.coolant_gap_temp[self._asm_sc_adj[id] - 1].flatten()
def adjacent_coolant_gap_htc(self, id):
"""Return heat transfer coefficients in gap subchannels
around a specific assembly
Parameters
----------
id : int
Assembly ID
"""
# return self.coolant_gap_params['htc'][
# self.sc_types[self.asm_sc_adj[id] - 1]].flatten()
return self.coolant_gap_params['htc'][
self._asm_sc_adj[id] - 1].flatten()
####################################################################
# UPDATE GAP COOLANT PARAMS
####################################################################
def _update_coolant_gap_params(self, temp):
"""Update correlated core inter-assembly gap coolant parameters
based on current average coolant temperature
Parameters
----------
temp : list
Average coolant temperature in inter-assembly gap
"""
self.gap_coolant.update(temp)
# Inter-assembly gap average velocity
self.coolant_gap_params['vel'] = \
(self.gap_flow_rate
/ self.gap_coolant.density
/ self.gap_params['total area'])
# Gap-average Reynolds number
self.coolant_gap_params['Re'] = \
(self.gap_flow_rate
* self.gap_params['total de']
/ self.gap_coolant.viscosity
/ self.gap_params['total area'])
# Subchannel Reynolds numbers
self.coolant_gap_params['Re_sc'] = \
(self.coolant_gap_params['_Re_sc'] # <-- = m_i * De_i / A_i
/ self.gap_coolant.viscosity)
# Heat transfer coefficient (via Nusselt number)
# Although coolant properties are global and velocity is the
# same everywhere, the subchannels do not have equal hydraulic
# diameter. Therefore will all have unique Nu
if self.model is None:
self.coolant_gap_params['htc'] = np.zeros(self.n_sc)
elif self.model == 'flow':
nu = nusselt_db.calculate_sc_Nu(
self.gap_coolant,
self.coolant_gap_params['Re_sc'],
self._htc_params)
self.coolant_gap_params['htc'] = \
(self.gap_coolant.thermal_conductivity
* nu / self.gap_params['de'])
else: # Nu == 1
# self.coolant_gap_params['htc'] = \
# (self.gap_coolant.thermal_conductivity
# / self.gap_params['de'])
# self.coolant_gap_params['htc'] = np.ones(self.n_sc)
# self.coolant_gap_params['htc'] *= 0.5 * self.d_gap
h = 2 * self.gap_coolant.thermal_conductivity / self.d_gap
self.coolant_gap_params['htc'] = np.full((self.n_sc,), h)
####################################################################
# COOLANT TEMPERATURE CALCULATION
####################################################################
def calculate_gap_temperatures(self, dz, asm_duct_temps):
"""Calculate the temperature of the inter-assembly coolant
Parameters
----------
dz : float
Axial step size
asm_duct_temps : list
List of outer duct surface temperatures (K) for each
assembly in the core (length must match gap meshing)
Returns
-------
None
"""
T_avg = self.avg_coolant_gap_temp
self._update_coolant_gap_params(T_avg)
self._update_energy_balance(dz, asm_duct_temps)
# Calculate new coolant gap temperatures
if self.model == 'flow':
dT = self._flow_model(dz, asm_duct_temps)
self.coolant_gap_temp += dT
elif self.model == 'no_flow':
self.coolant_gap_temp = self._noflow_model(asm_duct_temps)
elif self.model == 'duct_average':
self.coolant_gap_temp = self._duct_average_model(asm_duct_temps)
else: # self.model == None:
# No change to coolant gap temp, do nothing
pass
def _update_energy_balance(self, dz, approx_duct_temps):
"""Track the energy added to the coolant from each duct wall
mesh cell; summarize at the end for assembly-assembly energy
balance"""
# Convection constant
h = self.coolant_gap_params['htc'][self._asm_sc_adj - 1]
# Adj_cool = N_asm x 6 x n_sc
adj_cool = self.coolant_gap_temp[self._asm_sc_adj - 1]
# Smush to be N_asm x (6 * n_sc)
self.ebal['asm'] += (h * self.gap_params['asm wp']
* dz * (approx_duct_temps - adj_cool))
def _flow_model(self, dz, t_duct):
"""Inter-assembly gap convection model
Parameters
----------
dz : float
Axial mesh height
approx_duct_temps : numpy.ndarray
Array of outer duct surface temperatures (K) for each
assembly in the core (can be any length) on the inter-
assembly gap subchannel mesh
Returns
-------
numpy.ndarray
Temperature change in the inter-assembly gap coolant
"""
# CONVECTION TO/FROM DUCT WALL
C = (self._conv_util['const']
* self.coolant_gap_params['htc'][:, None])
dT = C[:, 0] * (t_duct[tuple(self._conv_util['inds'][0])]
- self.coolant_gap_temp)
dT += C[:, 1] * (t_duct[tuple(self._conv_util['inds'][1])]
- self.coolant_gap_temp)
dT += C[:, 2] * (t_duct[tuple(self._conv_util['inds'][2])]
- self.coolant_gap_temp)
# CONDUCTION TO/FROM OTHER COOLANT CHANNELS
dT += (self.gap_coolant.thermal_conductivity
* np.sum((self._Rcond *
(self.coolant_gap_temp[self._sc_adj - 1]
- self.coolant_gap_temp[..., None])), axis=1))
return (dT * dz * self._inv_sc_mfr
/ self.gap_coolant.heat_capacity)
def _noflow_model(self, t_duct):
"""Inter-assembly gap conduction model
Parameters
----------
t_duct : numpy.ndarray
Array of outer duct surface temperatures (K) for each
assembly in the core (can be any length) on the inter-
assembly gap subchannel mesh
Returns
-------
numpy.ndarray
Temperature in the inter-assembly gap coolant
Notes
-----
Recommended for use when inter-assembly gap flow rate is so
low that the the axial mesh requirement is intractably small.
Assumes no thermal contact resistance between the duct wall
and the coolant.
The contact resistance between the bulk liquid and the duct
wall is calculated using a heat transfer coefficient based on
the actual velocity of the interassembly gap flow
"""
# CONVECTION TO/FROM DUCT WALL
R_conv = self._conv_util['const']
# Lookup temperatures and mask as necessary
T = R_conv[:, 0] * t_duct[tuple(self._conv_util['inds'][0])]
T += R_conv[:, 1] * t_duct[tuple(self._conv_util['inds'][1])]
T += R_conv[:, 2] * t_duct[tuple(self._conv_util['inds'][2])]
# Get the total convection resistance, which will go in the
# denominator at the end
C_conv = R_conv[:, 0] + R_conv[:, 1] + R_conv[:, 2]
# CONDUCTION TO/FROM OTHER COOLANT CHANNELS
R_cond = self._Rcond
adj_ctemp = self.coolant_gap_temp[self._sc_adj - 1] * R_cond
C_cond = R_cond[:, 0] + R_cond[:, 1] + R_cond[:, 2]
# COMBINE AND APPLY TOTAL RESISTANCE DENOM
T += adj_ctemp[:, 0] + adj_ctemp[:, 1] + adj_ctemp[:, 2]
return T / (C_cond + C_conv)
def _duct_average_model(self, t_duct):
"""Inter-assembly gap model that simply averages the adjacent
duct wall surface temperatures
Parameters
----------
t_duct : numpy.ndarray
Array of outer duct surface temperatures (K) for each
assembly in the core (can be any length) on the inter-
assembly gap subchannel mesh
Returns
-------
numpy.ndarray
Temperature in the inter-assembly gap coolant
Notes
-----
Recommended for use when inter-assembly gap flow rate is so
low that the the axial mesh requirement is intractably small.
Assumes no thermal contact resistance between the duct wall
and the coolant.
The contact resistance between the bulk liquid and the duct
wall is calculated using a heat transfer coefficient based on
the actual velocity of the interassembly gap flow
"""
# if not hasattr(self, '_conv_util'):
# self._make_conv_mask() # creates lookup indices and masks
# Lookup temperatures and mask as necessary
T0 = t_duct[tuple(self._conv_util['inds'][0])]
T1 = (t_duct[tuple(self._conv_util['inds'][1])]
* self._conv_util['mask1'])
T2 = (t_duct[tuple(self._conv_util['inds'][2])]
* self._conv_util['mask2'])
# Average nonzero values
return (np.sum((T0, T1, T2), axis=0)
/ np.count_nonzero((T0, T1, T2), axis=0))
####################################################################
# MAP ASSEMBLY XY COORDINATES
####################################################################
def map_assembly_xy(self):
"""Determine the X-Y positions of the assemblies in the core.
Create a vector containing the x,y positions of the assemblies
in the hexagonal lattice.
Parameters
----------
None
Returns
-------
numpy.ndarray
Array (N_asm x 2) containing the assembly center X-Y coords
"""
xy = np.zeros((np.max(self.asm_map), 2))
normals = [2 * np.pi / 3,
np.pi,
4 * np.pi / 3,
5 * np.pi / 3,
0.0,
np.pi / 3]
# loc = (0.0, 0.0)
# Directions turning counterclockwise around a hexagon
# First entry is step from an inner ring to the top of an outer ring
# The remaining steps are the turns around the hexagon corners
_turns = [(1, 0), (1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1)]
idx = 1
loc = (0.0, 0.0)
for ring in range(2, int(self.n_ring + 1)):
d = 0
row = self.n_ring - ring
col = self.n_ring - ring
# first step always to the right
loc = (self.asm_pitch * (ring - 1), 0.0)
if self.asm_map[row, col] != 0:
xy[idx] = loc
idx += 1
# next steps walk around the ring
row += 1 # already did first step
positions = 6 * (ring - 1) # all positions on active ring
corners = np.arange(0, positions, ring - 1)
for pos in range(1, int(positions)):
loc = (loc[0] + self.asm_pitch * np.cos(normals[d]),
loc[1] + self.asm_pitch * np.sin(normals[d]))
# The active position may be 0 in reg_assignments,
# meaning that there's no region there. In that case,
# skip; otherwise, fill empty map entry.
if self.asm_map[row, col] != 0:
xy[idx] = loc
idx += 1
# change directions if you reach a corner
if pos > 0 and pos in corners:
d += 1
# update row and column for next position
row, col = row + _turns[d][0], col + _turns[d][1]
return xy
########################################################################
# CORE MAPPING METHODS
########################################################################
def count_rings(n_asm):
"""Identify number of rings given list of assembly
ring/position inputs"""
nr = int(np.ceil(0.5 * (1 + np.sqrt(1 + 4 * (n_asm - 1) // 3))))
return nr
def map_asm(asm_list):
r"""Map the assembly locations in the core.
Parameters
----------
asm_list : list
List of assembly assignments to positions, by position.
Length is equal to the total number of positions possible
in the core hexagon
Returns
-------
numpy.ndarray
Map of assemblies in the core
Notes
-----
The assemblies are numbered starting at the center assembly
(1) and continuing outward around each of the rings. The
first assembly of the next ring is that located on the
diagonal immediately above the center assembly. The
assemblies in each ring are labeled by traveling clockwise
around the ring.
A regular hexagon can be divided by three straight lines along
the long diagonals that pass through the corners and intersect
at the center. One of these runs straight up and down; the
second has an angle of 30 degrees from horizontal; the third
has an angle of 150 degrees from horizontal.
This assembly numbering scheme and division of the hexagon can
be used to map the assembly labels from the hexagon to a square
matrix, which uses mimics the three long diagonals in the pin
with the rows, columns, and one diagonal.
Example
-------
If the center assembly is labeled "1", then the second ring of
assemblies may be labeled:
(y) (x)
\ 7 .#
\ _____ #.
6 /\ / \ 2
/___\1/___\
\ / \ /
5 \/_____\/ 3
4
The map for this 7-assembly core would be as shown below; note
the rotation so that the first assembly in the new ring starts
at the top left position.
____
| 2 3 0 | | 2 3 \
| 7 1 4 | (note: | 7 1 4 | looks like a hexagon!)
| 0 6 5 | \ _6_5_|
Periodicity
-----------
The GEODST file may only specify 1/6 or 1/3 of the core,
which implies 60 or 120 degree periodicity. In that case,
the array obtained from the GEODST region map is only of
the partial core. These cases and the full core case are
handled separately within this method.
"""
# Directions turning clockwise around a hexagon
# First entry is step from an inner ring to the top of an
# outer ring; the remaining steps are the turns around the
# hexagon corners
# _dirs = [(-1, -1), (0, 1), (1, 1), (1, 0),
# (0, -1), (-1, -1), (-1, 0)]
_dirs = [(-1, -1), (1, 0), (1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1)]
nr = count_rings(len(asm_list))
asm_map = np.zeros((nr * 2 - 1, nr * 2 - 1), dtype=int)
asm_idx = 1
pos_idx = 1
# Fill center position
if not np.isnan(asm_list[0]):
asm_map[nr - 1, nr - 1] = asm_idx
asm_idx += 1
pos_idx += 1
# Fill rings
for ring in range(2, int(nr + 1)):
row = nr - ring
col = nr - ring
positions = 6 * (ring - 1) # all positions on active ring
corners = | np.arange(0, positions, ring - 1) | numpy.arange |
# Tests of the quasiisothermaldf module
from __future__ import print_function, division
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_pvRvT_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for adiabatic actions'
return None
def test_pvRvT_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
return None
def test_pvRvT_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=10) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=24
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=40) for vt in vTs] for vr in vRs])
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt( | numpy.sum(tvR**2.*pvRvT) | numpy.sum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.