prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import numpy as np
import random
import matplotlib.pyplot as plt
import lmdb
import pickle
import platform
np.random.seed(np.random.randint(1<<30))
num_frames = 20
seq_length = 20
image_size = 64
batch_size = 1
num_digits = 1
step_length = 0.1
digit_size = 28
frame_size = image_size ** 2
def create_reverse_dictionary(dictionary):
dictionary_reverse = {}
for word in dictionary:
index = dictionary[word]
dictionary_reverse[index] = word
return dictionary_reverse
dictionary = {'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'the': 10, 'digit': 11, 'and': 12,
'is':13, 'are':14, 'bouncing': 15, 'moving':16, 'here':17, 'there':18, 'around':19, 'jumping':20, 'up':21,
'down':22, 'left':23, 'right':24, 'then':25, '.':26}
motion_strings = ['up then down', 'left then right', 'down then up', 'right then left']
def create_dataset():
numbers = np.random.permutation(10)
dataset = np.zeros((2,10), dtype = np.int)
dataset[0,:] = numbers
dataset[1,:] = 10 + numbers
train = []
val = []
count = 0
for i in range(10):
dummy = count % 2
val.append(dataset[dummy, i])
train.append(dataset[1-dummy, i])
count = count + 1
return np.array(train), np.array(val)#,np.array(test)
def sent2matrix(sentence, dictionary):
words = sentence.split()
m = np.int32(np.zeros((1, len(words))))
for i in range(len(words)):
m[0,i] = dictionary[words[i]]
return m
def matrix2sent(matrix, reverse_dictionary):
text = ""
for i in range(matrix.shape[0]):
text = text + " " + reverse_dictionary[matrix[i]]
return text
def GetRandomTrajectory(batch_size,motion):
length = seq_length
canvas_size = image_size - digit_size
y = np.random.rand(batch_size) # the starting point of the two numbers
x = np.random.rand(batch_size)
start_y = np.zeros((length, batch_size))
start_x = np.zeros((length, batch_size))
if int(motion) == 0:
theta = np.ones(batch_size) * 0.5 * np.pi
else:
theta = np.ones(batch_size) * 0 * np.pi
v_y = 2 * | np.sin(theta) | numpy.sin |
"""
Training step for the paper: four labels + Ak + C + N
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
import pyfits
#sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon/TheCannon')
#sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon')
from TheCannon import dataset
from TheCannon import model
from TheCannon import lamost
from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
GIT_DIR = "/Users/annaho/Dropbox/Research/TheCannon/"
DATA_DIR = GIT_DIR + "data/"
SPEC_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age/with_col_mask/xval_with_cuts"
#SPEC_DIR = "."
def load_data():
print("Loading all data")
DIR = GIT_DIR + DATA_DIR
a = pyfits.open("%s/labels_file_full.fits" %DIR)
tbl = a[1].data
a.close()
# Pull out all APOGEE DR12 values
# FPARAM: (teff, logg, rvel, mh, c, n, alpha)
teff_all = tbl['FPARAM'][:,0]
logg_all = tbl['FPARAM'][:,1]
mh_all = tbl['FPARAM'][:,3]
cm_all = tbl['FPARAM'][:,4]
nm_all = tbl['FPARAM'][:,5]
am_all = tbl['FPARAM'][:,6]
ak_all = tbl['AK_WISE']
# Discard objects with Teff > 4550 if -1 < [M/H] < -0.5
print("Discarding objects")
choose_teff = teff_all > 4550
choose_mh = np.logical_and(-1 < mh_all, mh_all < -0.5)
discard_teff = np.logical_and(choose_mh, choose_teff) # 743 objects
# Discard objects with [C/M] < -0.4 dex
discard_cm = cm_all < -0.4 # 40 objects
# metal-poor stars [M/H] < -0.1 have sketchy scaling relations
# but this shouldn't affect our spectral C and N
# in Marie's paper they don't have any low-metallicity stars,
# but it doesn't matter for the training anyway.
bad = np.logical_or(discard_teff, discard_cm)
choose = ~bad
ref_id = tbl['lamost_id'][choose]
ref_id = np.array([val.strip() for val in ref_id]).astype(str)
ref_label = np.vstack((
teff_all[choose], logg_all[choose], mh_all[choose],
cm_all[choose], nm_all[choose], am_all[choose],
ak_all[choose])).T
np.savez("./ref_id.npz", ref_id)
np.savez("./ref_label.npz", ref_label)
print("Getting spectra")
all_id = np.load("%s/tr_id.npz" %SPEC_DIR)['arr_0'].astype(str)
all_flux = np.load("%s/tr_flux.npz" %SPEC_DIR)['arr_0']
all_ivar = np.load("%s/tr_ivar.npz" %SPEC_DIR)['arr_0']
choose = np.array([np.where(all_id==f)[0][0] for f in ref_id])
flux = all_flux[choose,:]
ivar = all_ivar[choose,:]
np.savez("ref_flux.npz", flux)
np.savez("ref_ivar.npz", ivar)
def train():
wl = np.load("%s/../wl_cols.npz" %SPEC_DIR)['arr_0']
tr_id = np.load("%s/ref_id.npz" %SPEC_DIR)['arr_0']
tr_label = np.load("%s/ref_label.npz" %SPEC_DIR)['arr_0']
tr_label = tr_label[:,0:3]
tr_flux = np.load("%s/ref_flux.npz" %SPEC_DIR)['arr_0']
tr_ivar = np.load("%s/ref_ivar.npz" %SPEC_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label,
tr_id, tr_flux, tr_ivar)
# teff, logg, mh, cm, nm, am, ak
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]']) #, '[C/M]','[N/M]',
#'[\\alpha/M]', 'A_k'])
#ds.diagnostics_SNR()
#ds.diagnostics_ref_labels()
#np.savez("ref_snr.npz", ds.tr_SNR)
print("Training model")
nlab = ds.tr_label.shape[1]
print(nlab)
npix = len(ds.wl)
print(npix)
filt = np.ones((nlab, npix), dtype=bool)
print(filt)
#filt[nlab-1,0:500] = 0
m = model.CannonModel(2, wl_filter = filt)
m.fit(ds)
| np.savez("./coeffs.npz", m.coeffs) | numpy.savez |
"""
GWR is tested against results from GWR4
"""
import os
import pysal.lib as ps
from pysal.lib import io
import numpy as np
import multiprocessing as mp
import unittest
import pandas
from types import SimpleNamespace
from ..gwr import GWR, MGWR, MGWRResults
from ..sel_bw import Sel_BW
from ..diagnostics import get_AICc, get_AIC, get_BIC, get_CV
from pysal.model.spglm.family import Gaussian, Poisson, Binomial
class TestGWRGaussianPool(unittest.TestCase):
def setUp(self):
data_path = ps.examples.get_path("GData_utm.csv")
data = io.open(data_path)
self.coords = list(zip(data.by_col('X'), data.by_col('Y')))
self.y = np.array(data.by_col('PctBach')).reshape((-1, 1))
rural = np.array(data.by_col('PctRural')).reshape((-1, 1))
pov = np.array(data.by_col('PctPov')).reshape((-1, 1))
black = np.array(data.by_col('PctBlack')).reshape((-1, 1))
fb = np.array(data.by_col('PctFB')).reshape((-1, 1))
self.X = np.hstack([rural, pov, black])
self.mgwr_X = np.hstack([fb, black, rural])
self.BS_F = io.open(ps.examples.get_path('georgia_BS_F_listwise.csv'))
self.BS_NN = io.open(
ps.examples.get_path('georgia_BS_NN_listwise.csv'))
self.GS_F = io.open(ps.examples.get_path('georgia_GS_F_listwise.csv'))
self.GS_NN = io.open(
ps.examples.get_path('georgia_GS_NN_listwise.csv'))
MGWR_path = os.path.join(
os.path.dirname(__file__), 'georgia_mgwr_results.csv')
self.MGWR = pandas.read_csv(MGWR_path)
self.pool = mp.Pool(4)
def test_BS_NN_Pool(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_rural = self.BS_NN.by_col(' est_PctRural')
se_rural = self.BS_NN.by_col(' se_PctRural')
t_rural = self.BS_NN.by_col(' t_PctRural')
est_pov = self.BS_NN.by_col(' est_PctPov')
se_pov = self.BS_NN.by_col(' se_PctPov')
t_pov = self.BS_NN.by_col(' t_PctPov')
est_black = self.BS_NN.by_col(' est_PctBlack')
se_black = self.BS_NN.by_col(' se_PctBlack')
t_black = self.BS_NN.by_col(' t_PctBlack')
yhat = self.BS_NN.by_col(' yhat')
res = np.array(self.BS_NN.by_col(' residual'))
std_res = np.array(self.BS_NN.by_col(' std_residual')).reshape((-1, 1))
localR2 = np.array(self.BS_NN.by_col(' localR2')).reshape((-1, 1))
inf = np.array(self.BS_NN.by_col(' influence')).reshape((-1, 1))
cooksD = np.array(self.BS_NN.by_col(' CooksD')).reshape((-1, 1))
local_corr = os.path.join(os.path.dirname(__file__), 'local_corr.csv')
corr1 = np.array(io.open(local_corr))
local_vif = os.path.join(os.path.dirname(__file__), 'local_vif.csv')
vif1 = np.array(io.open(local_vif))
local_cn = os.path.join(os.path.dirname(__file__), 'local_cn.csv')
cn1 = np.array(io.open(local_cn))
local_vdp = os.path.join(os.path.dirname(__file__), 'local_vdp.csv')
vdp1 = np.array(io.open(local_vdp), dtype=np.float64)
spat_var_p_vals = [0., 0.0, 0.5, 0.2]
model = GWR(self.coords, self.y, self.X, bw=90.000, fixed=False,
sigma2_v1=False)
rslt = model.fit(pool=self.pool)
adj_alpha = rslt.adj_alpha
alpha = 0.01017489
critical_t = rslt.critical_tval(alpha)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
corr2, vif2, cn2, vdp2 = rslt.local_collinearity()
R2 = rslt.R2
np.testing.assert_allclose(
adj_alpha, np.array([0.02034978, 0.01017489, 0.0002035]),
rtol=1e-04)
self.assertAlmostEquals(critical_t, 2.6011011542649394)
self.assertAlmostEquals(np.around(R2, 4), 0.5924)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.19)
np.testing.assert_allclose(corr1, corr2, rtol=1e-04)
np.testing.assert_allclose(vif1, vif2, rtol=1e-04)
np.testing.assert_allclose(cn1, cn2, rtol=1e-04)
np.testing.assert_allclose(vdp1, vdp2, rtol=1e-04)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
sel = Sel_BW(self.coords, self.y, self.X)
bw = sel.search(pool=self.pool)
model = GWR(self.coords, self.y, self.X, bw)
result = model.fit(pool=self.pool)
p_vals = result.spatial_variability(sel, 10)
np.testing.assert_allclose(spat_var_p_vals, p_vals, rtol=1e-04)
def test_GS_F_Pool(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_rural = self.GS_F.by_col(' est_PctRural')
se_rural = self.GS_F.by_col(' se_PctRural')
t_rural = self.GS_F.by_col(' t_PctRural')
est_pov = self.GS_F.by_col(' est_PctPov')
se_pov = self.GS_F.by_col(' se_PctPov')
t_pov = self.GS_F.by_col(' t_PctPov')
est_black = self.GS_F.by_col(' est_PctBlack')
se_black = self.GS_F.by_col(' se_PctBlack')
t_black = self.GS_F.by_col(' t_PctBlack')
yhat = self.GS_F.by_col(' yhat')
res = np.array(self.GS_F.by_col(' residual'))
std_res = np.array(self.GS_F.by_col(' std_residual')).reshape((-1, 1))
localR2 = np.array(self.GS_F.by_col(' localR2')).reshape((-1, 1))
inf = np.array(self.GS_F.by_col(' influence')).reshape((-1, 1))
cooksD = np.array(self.GS_F.by_col(' CooksD')).reshape((-1, 1))
model = GWR(self.coords, self.y, self.X, bw=87308.298,
kernel='gaussian', fixed=True, sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 895.0)
self.assertAlmostEquals(np.floor(AIC), 890.0)
self.assertAlmostEquals(np.floor(BIC), 943.0)
self.assertAlmostEquals(np.around(CV, 2), 18.21)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_MGWR_Pool(self):
std_y = (self.y - self.y.mean()) / self.y.std()
std_X = (self.mgwr_X - self.mgwr_X.mean(axis=0)) / \
self.mgwr_X.std(axis=0)
selector = Sel_BW(self.coords, std_y, std_X, multi=True, constant=True)
selector.search(multi_bw_min=[2], multi_bw_max=[159], pool=self.pool)
model = MGWR(self.coords, std_y, std_X, selector=selector,
constant=True)
rslt = model.fit(pool=self.pool)
rslt_2 = model.fit(n_chunks=2,
pool=self.pool) #testing for n_chunks > 1
rslt_3 = model.fit(n_chunks=3, pool=self.pool)
rslt_20 = model.fit(n_chunks=20, pool=self.pool)
model_hat = MGWR(self.coords, std_y, std_X, selector=selector,
constant=True, hat_matrix=True)
rslt_hat = model_hat.fit(pool=self.pool)
rslt_hat_2 = model_hat.fit(n_chunks=2, pool=self.pool)
np.testing.assert_allclose(rslt_hat.R, rslt_hat_2.R, atol=1e-07)
np.testing.assert_allclose(
rslt_hat.S.dot(std_y).flatten(), self.MGWR.predy, atol=1e-07)
varnames = ['X0', 'X1', 'X2', 'X3']
# def suffixed(x):
# """ Quick anonymous function to suffix strings"""
# return ['_'.join(x) for x in varnames]
np.testing.assert_allclose(rslt.predy.flatten(), self.MGWR.predy,
atol=1e-07)
np.testing.assert_allclose(rslt.params, self.MGWR[varnames].values,
atol=1e-07)
np.testing.assert_allclose(
rslt.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_2.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_3.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt_20.bse, self.MGWR[[s + "_bse" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(
rslt.tvalues, self.MGWR[[s + "_tvalues" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(rslt.resid_response,
self.MGWR.resid_response, atol=1e-04,
rtol=1e-04)
np.testing.assert_almost_equal(rslt.resid_ss, 50.899379467870425)
np.testing.assert_almost_equal(rslt.aicc, 297.12013812258783)
np.testing.assert_almost_equal(rslt.ENP, 11.36825087269831)
np.testing.assert_allclose(rslt.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_2.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_3.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(rslt_20.ENP_j, [
3.844671080264143, 3.513770805151652, 2.2580525278898254,
1.7517564593926895
])
np.testing.assert_allclose(
rslt.adj_alpha_j,
np.array([[0.02601003, 0.01300501, 0.0002601],
[0.02845945, 0.01422973, 0.00028459],
[0.04428595, 0.02214297, 0.00044286],
[0.05708556, 0.02854278, 0.00057086]]), atol=1e-07)
np.testing.assert_allclose(
rslt.critical_tval(),
np.array([2.51210749, 2.47888792, 2.31069113, 2.21000184]),
atol=1e-07)
np.testing.assert_allclose(
rslt.filter_tvals(),
self.MGWR[[s + "_filter_tvalues" for s in varnames]].values,
atol=1e-07)
np.testing.assert_allclose(rslt.local_collinearity()[0].flatten(),
self.MGWR.local_collinearity, atol=1e-07)
def test_Prediction(self):
coords = np.array(self.coords)
index = np.arange(len(self.y))
test = index[-10:]
X_test = self.X[test]
coords_test = coords[test]
model = GWR(self.coords, self.y, self.X, 93, family=Gaussian(),
fixed=False, kernel='bisquare', sigma2_v1=False)
results = model.predict(coords_test, X_test)
params = np.array([
22.77198, -0.10254, -0.215093, -0.01405, 19.10531, -0.094177,
-0.232529, 0.071913, 19.743421, -0.080447, -0.30893, 0.083206,
17.505759, -0.078919, -0.187955, 0.051719, 27.747402, -0.165335,
-0.208553, 0.004067, 26.210627, -0.138398, -0.360514, 0.072199,
18.034833, -0.077047, -0.260556, 0.084319, 28.452802, -0.163408,
-0.14097, -0.063076, 22.353095, -0.103046, -0.226654, 0.002992,
18.220508, -0.074034, -0.309812, 0.108636
]).reshape((10, 4))
np.testing.assert_allclose(params, results.params, rtol=1e-03)
bse = np.array([
2.080166, 0.021462, 0.102954, 0.049627, 2.536355, 0.022111,
0.123857, 0.051917, 1.967813, 0.019716, 0.102562, 0.054918,
2.463219, 0.021745, 0.110297, 0.044189, 1.556056, 0.019513,
0.12764, 0.040315, 1.664108, 0.020114, 0.131208, 0.041613, 2.5835,
0.021481, 0.113158, 0.047243, 1.709483, 0.019752, 0.116944,
0.043636, 1.958233, 0.020947, 0.09974, 0.049821, 2.276849,
0.020122, 0.107867, 0.047842
]).reshape((10, 4))
np.testing.assert_allclose(bse, results.bse, rtol=1e-03)
tvalues = np.array([
10.947193, -4.777659, -2.089223, -0.283103, 7.532584, -4.259179,
-1.877395, 1.385161, 10.033179, -4.080362, -3.012133, 1.515096,
7.106862, -3.629311, -1.704079, 1.17042, 17.831878, -8.473156,
-1.633924, 0.100891, 15.750552, -6.880725, -2.74765, 1.734978,
6.980774, -3.586757, -2.302575, 1.784818, 16.644095, -8.273001,
-1.205451, -1.445501, 11.414933, -4.919384, -2.272458, 0.060064,
8.00251, -3.679274, -2.872176, 2.270738
]).reshape((10, 4))
np.testing.assert_allclose(tvalues, results.tvalues, rtol=1e-03)
localR2 = np.array([[0.53068693], [0.59582647], [0.59700925],
[0.45769954], [0.54634509], [0.5494828],
[0.55159604], [0.55634237], [0.53903842],
[0.55884954]])
np.testing.assert_allclose(localR2, results.localR2, rtol=1e-05)
predictions = np.array([[10.51695514], [9.93321992], [8.92473026],
[5.47350219], [8.61756585], [12.8141851],
[5.55619405], [12.63004172], [8.70638418],
[8.17582599]])
np.testing.assert_allclose(predictions, results.predictions,
rtol=1e-05)
def test_BS_NN_longlat_Pool(self):
GA_longlat = os.path.join(
os.path.dirname(__file__), 'ga_bs_nn_longlat_listwise.csv')
self.BS_NN_longlat = io.open(GA_longlat)
coords_longlat = list(
zip(
self.BS_NN_longlat.by_col(' x_coord'),
self.BS_NN_longlat.by_col(' y_coord')))
est_Int = self.BS_NN_longlat.by_col(' est_Intercept')
se_Int = self.BS_NN_longlat.by_col(' se_Intercept')
t_Int = self.BS_NN_longlat.by_col(' t_Intercept')
est_rural = self.BS_NN_longlat.by_col(' est_PctRural')
se_rural = self.BS_NN_longlat.by_col(' se_PctRural')
t_rural = self.BS_NN_longlat.by_col(' t_PctRural')
est_pov = self.BS_NN_longlat.by_col(' est_PctPov')
se_pov = self.BS_NN_longlat.by_col(' se_PctPov')
t_pov = self.BS_NN_longlat.by_col(' t_PctPov')
est_black = self.BS_NN_longlat.by_col(' est_PctBlack')
se_black = self.BS_NN_longlat.by_col(' se_PctBlack')
t_black = self.BS_NN_longlat.by_col(' t_PctBlack')
yhat = self.BS_NN_longlat.by_col(' yhat')
res = np.array(self.BS_NN_longlat.by_col(' residual'))
std_res = np.array(self.BS_NN_longlat.by_col(' std_residual')).reshape(
(-1, 1))
localR2 = np.array(self.BS_NN_longlat.by_col(' localR2')).reshape((-1,
1))
inf = np.array(self.BS_NN_longlat.by_col(' influence')).reshape((-1,
1))
cooksD = np.array(self.BS_NN_longlat.by_col(' CooksD')).reshape((-1,
1))
model = GWR(coords_longlat, self.y, self.X, bw=90.000, fixed=False,
spherical=True, sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
R2 = rslt.R2
self.assertAlmostEquals(np.around(R2, 4), 0.5921)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.11)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:, 1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:, 1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:, 2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:, 2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:, 3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
class TestGWRPoissonPool(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'tokyo/Tokyomortality.csv')
data = io.open(data_path, mode='Ur')
self.coords = list(
zip(data.by_col('X_CENTROID'), data.by_col('Y_CENTROID')))
self.y = np.array(data.by_col('db2564')).reshape((-1, 1))
self.off = np.array(data.by_col('eb2564')).reshape((-1, 1))
OCC = np.array(data.by_col('OCC_TEC')).reshape((-1, 1))
OWN = np.array(data.by_col('OWNH')).reshape((-1, 1))
POP = np.array(data.by_col('POP65')).reshape((-1, 1))
UNEMP = np.array(data.by_col('UNEMP')).reshape((-1, 1))
self.X = np.hstack([OCC, OWN, POP, UNEMP])
self.BS_F = io.open(
os.path.join(
os.path.dirname(__file__), 'tokyo/tokyo_BS_F_listwise.csv'))
self.BS_NN = io.open(
os.path.join(
os.path.dirname(__file__), 'tokyo/tokyo_BS_NN_listwise.csv'))
self.GS_F = io.open(
os.path.join(
os.path.dirname(__file__), 'tokyo/tokyo_GS_F_listwise.csv'))
self.GS_NN = io.open(
os.path.join(
os.path.dirname(__file__), 'tokyo/tokyo_GS_NN_listwise.csv'))
self.BS_NN_OFF = io.open(
os.path.join(
os.path.dirname(__file__),
'tokyo/tokyo_BS_NN_OFF_listwise.csv'))
self.pool = mp.Pool(4)
def test_BS_F_Pool(self):
est_Int = self.BS_F.by_col(' est_Intercept')
se_Int = self.BS_F.by_col(' se_Intercept')
t_Int = self.BS_F.by_col(' t_Intercept')
est_OCC = self.BS_F.by_col(' est_OCC_TEC')
se_OCC = self.BS_F.by_col(' se_OCC_TEC')
t_OCC = self.BS_F.by_col(' t_OCC_TEC')
est_OWN = self.BS_F.by_col(' est_OWNH')
se_OWN = self.BS_F.by_col(' se_OWNH')
t_OWN = self.BS_F.by_col(' t_OWNH')
est_POP = self.BS_F.by_col(' est_POP65')
se_POP = self.BS_F.by_col(' se_POP65')
t_POP = self.BS_F.by_col(' t_POP65')
est_UNEMP = self.BS_F.by_col(' est_UNEMP')
se_UNEMP = self.BS_F.by_col(' se_UNEMP')
t_UNEMP = self.BS_F.by_col(' t_UNEMP')
yhat = self.BS_F.by_col(' yhat')
pdev = np.array(self.BS_F.by_col(' localpdev')).reshape((-1, 1))
model = GWR(self.coords, self.y, self.X, bw=26029.625,
family=Poisson(), kernel='bisquare', fixed=True,
sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 13294.0)
self.assertAlmostEquals(np.floor(AIC), 13247.0)
self.assertAlmostEquals(np.floor(BIC), 13485.0)
np.testing.assert_allclose(est_Int, rslt.params[:, 0], rtol=1e-05)
np.testing.assert_allclose(se_Int, rslt.bse[:, 0], rtol=1e-03)
np.testing.assert_allclose(t_Int, rslt.tvalues[:, 0], rtol=1e-03)
np.testing.assert_allclose(est_OCC, rslt.params[:, 1], rtol=1e-04)
np.testing.assert_allclose(se_OCC, rslt.bse[:, 1], rtol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:, 1], rtol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:, 2], rtol=1e-04)
np.testing.assert_allclose(se_OWN, rslt.bse[:, 2], rtol=1e-03)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:, 2], rtol=1e-03)
np.testing.assert_allclose(est_POP, rslt.params[:, 3], rtol=1e-04)
np.testing.assert_allclose(se_POP, rslt.bse[:, 3], rtol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:, 3], rtol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:, 4], rtol=1e-04)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:, 4], rtol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:, 4], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_BS_NN_Pool(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_OCC = self.BS_NN.by_col(' est_OCC_TEC')
se_OCC = self.BS_NN.by_col(' se_OCC_TEC')
t_OCC = self.BS_NN.by_col(' t_OCC_TEC')
est_OWN = self.BS_NN.by_col(' est_OWNH')
se_OWN = self.BS_NN.by_col(' se_OWNH')
t_OWN = self.BS_NN.by_col(' t_OWNH')
est_POP = self.BS_NN.by_col(' est_POP65')
se_POP = self.BS_NN.by_col(' se_POP65')
t_POP = self.BS_NN.by_col(' t_POP65')
est_UNEMP = self.BS_NN.by_col(' est_UNEMP')
se_UNEMP = self.BS_NN.by_col(' se_UNEMP')
t_UNEMP = self.BS_NN.by_col(' t_UNEMP')
yhat = self.BS_NN.by_col(' yhat')
pdev = np.array(self.BS_NN.by_col(' localpdev')).reshape((-1, 1))
model = GWR(self.coords, self.y, self.X, bw=50, family=Poisson(),
kernel='bisquare', fixed=False, sigma2_v1=False)
rslt = model.fit(pool=self.pool)
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
D2 = rslt.D2
self.assertAlmostEquals(np.floor(AICc), 13285)
self.assertAlmostEquals(np.floor(AIC), 13259.0)
self.assertAlmostEquals(np.floor(BIC), 13442.0)
self.assertAlmostEquals( | np.round(D2, 3) | numpy.round |
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import sparse_categorical_crossentropy
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.utils import tqdm
from graphgallery.attack.targeted import TensorFlow
from ..targeted_attacker import TargetedAttacker
@TensorFlow.register()
class SGA(TargetedAttacker):
def process(self, surrogate, reset=True):
assert isinstance(surrogate, gg.gallery.nodeclas.SGC), surrogate
K = surrogate.cfg.data.K # NOTE: Be compatible with graphgallery
# nodes with the same class labels
self.similar_nodes = [
np.where(self.graph.node_label == c)[0]
for c in range(self.num_classes)
]
with tf.device(self.device):
W, b = surrogate.model.weights
X = tf.convert_to_tensor(self.graph.node_attr,
dtype=self.floatx)
self.b = b
self.XW = X @ W
self.K = K
self.logits = surrogate.predict(np.arange(self.num_nodes))
self.loss_fn = sparse_categorical_crossentropy
self.shape = self.graph.adj_matrix.shape
if reset:
self.reset()
return self
def reset(self):
super().reset()
# for the added self-loop
self.selfloop_degree = (self.degree + 1.).astype(self.floatx)
self.adj_flips = {}
self.wrong_label = None
return self
def attack(self,
target,
num_budgets=None,
logit=None,
attacker_nodes=3,
direct_attack=True,
structure_attack=True,
feature_attack=False,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
if logit is None:
logit = self.logits[target]
idx = list(set(range(logit.size)) - set([self.target_label]))
wrong_label = idx[logit[idx].argmax()]
with tf.device(self.device):
self.wrong_label = wrong_label
self.true_label = tf.convert_to_tensor(self.target_label,
dtype=self.floatx)
self.subgraph_preprocessing(attacker_nodes)
offset = self.edge_weights.shape[0]
# for indirect attack, the edges related to targeted node should not be considered
if not direct_attack:
row, col = self.edge_index
mask = tf.convert_to_tensor(np.logical_and(row != target, col != target), dtype=gg.floatx())
else:
mask = 1.0
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
edge_grad, non_edge_grad = self.compute_gradient()
edge_grad = normalize_GCN(self.edge_index, edge_grad,
self.selfloop_degree)
non_edge_grad = normalize_GCN(self.non_edge_index,
non_edge_grad,
self.selfloop_degree)
edge_grad *= (-2 * self.edge_weights + 1) * mask
non_edge_grad *= (-2 * self.non_edge_weights + 1)
gradients = tf.concat([edge_grad, non_edge_grad], axis=0)
index = tf.argmax(gradients)
if index < offset:
u, v = self.edge_index[:, index]
add = False
else:
index -= offset
u, v = self.non_edge_index[:, index]
add = True
assert not self.is_modified(u, v)
self.adj_flips[(u, v)] = it
self.update_subgraph(u, v, index, add=add)
return self
def subgraph_preprocessing(self, attacker_nodes=None):
target = self.target
wrong_label = self.wrong_label
neighbors = self.graph.adj_matrix[target].indices
wrong_label_nodes = self.similar_nodes[wrong_label]
sub_edges, sub_nodes = self.ego_subgraph()
sub_edges = sub_edges.T # shape [2, M]
if self.direct_attack or attacker_nodes is not None:
influence_nodes = [target]
wrong_label_nodes = | np.setdiff1d(wrong_label_nodes, neighbors) | numpy.setdiff1d |
import sys
import warnings
import numpy as np
from tempfile import mkdtemp
from astropy.stats import sigma_clipped_stats
from sfft.utils.pyAstroMatic.PYSEx import PY_SEx
from sfft.utils.HoughDetection import Hough_Detection
__author__ = "<NAME> <<EMAIL>>"
__version__ = "v1.0"
"""
# MeLOn Notes
# @ Point-Source Extractor
# A) A PSFEx suggested Morphological Classifier, based on a 2D distribution diagram
# FLUX_RADIUS [X-axis] - MAG_AUTO [Y-axis], A universal but naive approach.
# We first draw all isolated sources on the plane, and the typical distribution will form a 'Y' shape.
# A nearly vertical branch, A nearly horizontal branch and their cross with a tail at faint side.
#
# Here I give rough conclusions with comments, note I have compared with Legacy Survety Tractor Catalog.
# a. The point sources would be distributed around the nearly vertical stright line. {vertical branch}
# NOTE At the faint end, point sources no longer cling to the line, being diffuse in the cross and tail.
# b. Close to the bright end of the stright-line are saturated or slight-nonlinear sources, with a deviated direction.
# c. The right side of the line are typically extended structure, mainly including various galaxies. {horizontal branch}
# NOTE At the faint end, likewise, extended sources also exist, being diffuse in the cross and tail.
# d. Scattering points are located at very left side of the line, they are generally hotpix, cosmic-ray or
# some small-scale artifacts. Keep in mind, they are typically outlier-like and away from the cross and tail.
#
# METHOD: For simplicity, we only crudely divide the diagram into 3 regions, w.r.t. the vetical line.
# they are, Radius-Mid (FR-M), Radius-Large (FR-L) and Radius-Small (FR-S).
#
# B) 3 hierarchic groups
# > Good Sources:
# + NOT in FR-S region (union of FR-M & FR-L)
# NOTE Good Sources is consist of the vertical & horizontal branches with their cross (not the tail),
# which is roughly equivalent to the set of REAL Point-Sources & Extended Sources
# with rejection the samples in the tail (at faint & small-radius end).
# NOTE Good Sources are commonly used as FITTING Candidates in Image Subtraction.
# It is acceptable to lose the samples in the tail.
#
# >> {subgroup} Point Sources:
# + Restricted into FR-M Region ||| Should be located around the Hough-Line
# + Basically Circular-Shape ||| PsfEx-ELLIPTICITY = (A-B) / (A+B) < PS_ELLIPThresh
# NOTE At cross region, this identification criteria mis-include some extended source
# On the flip side, some REAL PointSource samples are missing in the tail.
# NOTE Point Sources are usually employed as FWHM Estimator.
# NOTE We may lossen PS_ELLIPThresh if psf itself is significantly asymmetric (e.g. tracking problem).
#
# >>> {sub-subgroup} High-SNR Point Sources
# + SNR_WIN > HPS_SNRThresh, then reject the bright end [typically, 15% (HPS_Reject)] point-sources.
# ++ If remaining sources are less than 30 (HPS_NumLowerLimit),
# Simply Use the point-sources with highest SNR_WIN.
# NOTE In Common, this subset is for Flux-Calibration & Building PSF Model.
# NOTE The defult HPS_SNRThresh = 100 might be too high, you may loosen it to
# ~ 15 to make sure you have enough samples, especially for psf modeling.
#
# @ Remarks on the HPS BrightEnd-Cutoff
# Assume SExtractor received precise SATURATE, saturated sources should be fully rejected via FLAG constrain.
# However, in practice, it's hard to fullfill this condition strictly, that is why we design a simple BrightEnd-Cutoff
# to prevent the set from such contaminations. Compared with mentioned usage of GS & PS, that of HPS is more
# vulnerable to such situation. FLUX-Calibtation and Building PSF-Model do not require sample completeness, but
# likely to be sensitive to the sources with appreiable non-linear response.
#
# C) Additional WARNINGS
# a. This extracor is ONLY designed for sparse field (isolated sources dominated case).
# We just take these isloated & non-saturated sources (FLAGS=0) into account in this function.
#
# b. We employ Hough Transformation to detect the Stright-Line feature in the image,
# naturally sampled from the raw scatter diagram. But note such diagram makes sense
# only if we could detect enough sources (typically > 200) in the given image.
# NOTE Reversed axes employed --- MAG_AUTO [X-axis] - FLUX_RADIUS [Y-axis].
"""
class Hough_MorphClassifier:
def MakeCatalog(FITS_obj, GAIN_KEY='GAIN', SATUR_KEY='SATURATE', \
BACK_TYPE='AUTO', BACK_VALUE='0.0', BACK_SIZE=64, BACK_FILTERSIZE=3, \
DETECT_THRESH=2.0, DETECT_MINAREA=5, DETECT_MAXAREA=0, \
BACKPHOTO_TYPE='LOCAL', CHECKIMAGE_TYPE='NONE', \
AddRD=False, BoundarySIZE=30, AddSNR=True):
# * Trigger SExtractor
# NOTE: it is a compromise to adopt XY rather than XYWIN for both point and extended sources.
# NOTE: only takes Isolated & Non-Saturated sources (FLAGS = 0) into account.
# FIXME: one may need to tune DETECT_THRESH & DETECT_MINAREA for specific program.
PL = ['X_IMAGE', 'Y_IMAGE', 'FLUX_AUTO', 'FLUXERR_AUTO', 'MAG_AUTO', 'MAGERR_AUTO', \
'FLAGS', 'FLUX_RADIUS', 'FWHM_IMAGE', 'A_IMAGE', 'B_IMAGE']
if AddSNR: PL.append('SNR_WIN')
PYSEX_OP = PY_SEx.PS(FITS_obj=FITS_obj, PL=PL, GAIN_KEY=GAIN_KEY, SATUR_KEY=SATUR_KEY, \
BACK_TYPE=BACK_TYPE, BACK_VALUE=BACK_VALUE, BACK_SIZE=BACK_SIZE, BACK_FILTERSIZE=BACK_FILTERSIZE, \
DETECT_THRESH=DETECT_THRESH, DETECT_MINAREA=DETECT_MINAREA, DETECT_MAXAREA=DETECT_MAXAREA, \
BACKPHOTO_TYPE=BACKPHOTO_TYPE, CHECKIMAGE_TYPE=CHECKIMAGE_TYPE, AddRD=AddRD, ONLY_FLAG0=True, \
XBoundary=BoundarySIZE, YBoundary=BoundarySIZE, MDIR=None)
return PYSEX_OP
def Classifier(AstSEx, Hough_FRLowerLimit=0.1, Hough_res=0.05, Hough_count_thresh=1, Hough_peakclip=0.7, \
LineTheta_thresh=0.2, BeltHW=0.2, PS_ELLIPThresh=0.3, Return_HPS=False, \
HPS_SNRThresh=100.0, HPS_Reject=0.15, HPS_NumLowerLimit=30):
A_IMAGE = np.array(AstSEx['A_IMAGE'])
B_IMAGE = np.array(AstSEx['B_IMAGE'])
MA_FR = np.array([AstSEx['MAG_AUTO'], AstSEx['FLUX_RADIUS']]).T
ELLIP = (A_IMAGE - B_IMAGE)/(A_IMAGE + B_IMAGE)
MASK_ELLIP = ELLIP < PS_ELLIPThresh
# * Trigger Hough Dectection
# Use Hough-Transformation detect the Point-Source-Line from the scatter points in
# diagram X [MAG_AUTO] - Y [FLUX_RADIUS], which is a nearly-horizon stright line.
# ** Remarks on the Mask for Hough Transformation
# It is s useful to make restriction on FLUX_RADIUS (R) of the scatter points for hough detection.
# I. Exclude the sources with unusally large R > 20.0 can speed up the process.
# II. The sources with small R (typically ~ 0.5) are likely hot pixels or cosmic rays.
# The parameter Hough_FRLowerLimit is the lower bound of FLUX_RATIO for Hough transformation.
# Setting a proper lower bound can avoid to detect some line features by chance,
# which are not contributed from point sources but resides in the small-FLUX_RATIO region.
# NOTE: One need to choose a proper Hough_FRLowerLimit according to the fact if the image is
# under/well/over-sampling (depending on the instrumental configuration and typical seeing conditions)
# recommended values of Hough_FRLowerLimit range from 0.1 to 1.0
MA, FR = MA_FR[:, 0], MA_FR[:, 1]
MA_MID = np.nanmedian(MA)
Hmask = np.logical_and.reduce((FR > Hough_FRLowerLimit, FR < 10.0, MA > MA_MID-7.0, MA < MA_MID+7.0))
HDOP = Hough_Detection.HD(XY_obj=MA_FR, Hmask=Hmask, res=Hough_res, \
count_thresh=Hough_count_thresh, peakclip=Hough_peakclip)
ThetaPeaks, RhoPeaks, ScaLineDIS = HDOP[1], HDOP[2], HDOP[4]
# NOTE: consider the strongest nearly-horizon peak as the one associated with the point source feature.
Avmask = np.abs(ThetaPeaks) < LineTheta_thresh
AvIDX = np.where(Avmask)[0]
if len(AvIDX) == 0:
Horindex = None
warnings.warn('MeLOn WARNING: NO nearly-horizon peak as Point-Source-Line!')
if len(AvIDX) == 1:
Horindex = AvIDX[0]
print('MeLOn CheckPoint: the UNIQUE nearly-horizon peak as Point-Source-Line!')
if len(AvIDX) > 1:
Horindex = np.min(AvIDX)
warnings.warn('MeLOn WARNING: there are MULTIPLE nearly-horizon peaks and use the STRONGEST as Point-Source-Line!')
if Horindex is not None:
HorThetaPeak = ThetaPeaks[Horindex]
HorRhoPeak = RhoPeaks[Horindex]
HorScaLineDIS = ScaLineDIS[:, Horindex]
print('MeLOn CheckPoint: the Hough-Detected Point-Source-Line is characterized by (%s, %s)' \
%(HorThetaPeak, HorRhoPeak))
# NOTE: Note that HorThetaPeak is around 0, thus cos(HorThetaPeak) around 1 then >> 0,
# thus above-line/FRL region is x_above * sin(HorThetaPeak) + y_above * cos(HorRhoPeak) > rho.
MASK_FRM = HorScaLineDIS < BeltHW
MASK_FRL = MA_FR[:, 0] * np.sin(HorThetaPeak) + MA_FR[:, 1] * np.cos(HorThetaPeak) > HorRhoPeak
MASK_FRL = np.logical_and(MASK_FRL, ~MASK_FRM)
else:
# NOTE: If we have enough samples, using the bright & small-FR subgroup might be
# more appropriate for the estimate. However, it is quite tricky to find a generic
# reliable way to find the point sources when the Hough Transformation doesn't work.
# Here we only simply reject the samples with low significance.
BPmask = AstSEx['MAGERR_AUTO'] < 0.2
Rmid = sigma_clipped_stats(MA_FR[BPmask, 1], sigma=3.0, maxiters=5)[1]
MASK_FRM = np.abs(MA_FR[:, 1] - Rmid) < BeltHW
MASK_FRL = MA_FR[:, 1] - Rmid > BeltHW
warnings.warn('MeLOn WARNING: the STANDBY approach is actived to determine the FRM region!')
MASK_FRS = ~np.logical_or(MASK_FRM, MASK_FRL)
LABEL_FR = np.array(['FR-S'] * len(AstSEx))
LABEL_FR[MASK_FRM] = 'FR-M'
LABEL_FR[MASK_FRL] = 'FR-L'
print('MeLOn CheckPoint: count Lables from Hough Transformation [FR-S (%s) / FR-M (%s) / FR-L (%s)] !' \
%(np.sum(MASK_FRS), np.sum(MASK_FRM), np.sum(MASK_FRL)))
# * Produce the 3 hierarchic groups
# ** Good Sources
MASK_GS = ~MASK_FRS
# *** Point Sources
MASK_PS = np.logical_and(MASK_FRM, MASK_ELLIP)
assert np.sum(MASK_PS) > 0
FWHM = round(np.median(AstSEx[MASK_PS]['FWHM_IMAGE']), 6)
print('MeLOn CheckPoint: Good-Sources in the Image [%d] ' % | np.sum(MASK_GS) | numpy.sum |
"""
@author: <EMAIL>
"""
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as kl
import tensorflow.keras.losses as kls
import matplotlib.pyplot as plt
import math
import os
from tqdm import tqdm
from scipy.interpolate import interp1d
import time
#disable gpu
physical_devices = tf.config.experimental.list_physical_devices('GPU')
try:
# Disable first GPU
tf.config.set_visible_devices(physical_devices[1:], 'GPU')
logical_devices = tf.config.list_logical_devices('GPU')
# Logical device was not created for first GPU
assert len(logical_devices) == len(physical_devices) - 1
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import havsim
from havsim.simulation.simulationold2 import update2nd_cir, eq_circular, simulate_cir, simulate_step, update_cir
from havsim.plotting import plotformat, platoonplot
from havsim.simulation.models import IDM_b3, IDM_b3_eql
#to start we will just use a quantized action space since continuous actions is more complicated
#%%
class ProbabilityDistribution(tf.keras.Model):
def call(self, logits, **kwargs):
return tf.squeeze(tf.random.categorical(logits, 1), axis=-1)
class PolicyModel(tf.keras.Model):
def __init__(self, num_actions, num_hiddenlayers = 3, num_neurons = 32, activationlayer = kl.LeakyReLU()):
super().__init__('mlp_policy')
self.num_hiddenlayers=num_hiddenlayers
self.activationlayer = activationlayer
self.hidden1 = kl.Dense(num_neurons) #hidden layer for actions (policy)
self.hidden11 = kl.Dense(num_neurons)
if self.num_hiddenlayers > 2:
self.hidden111 = kl.Dense(num_neurons)
if self.num_hiddenlayers > 3:
self.hidden1111 = kl.Dense(num_neurons)
# Logits are unnormalized log probabilities.
self.logits = kl.Dense(num_actions, name = 'policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_logs = self.hidden1(x)
hidden_logs = self.activationlayer(hidden_logs)
hidden_logs = self.hidden11(hidden_logs)
hidden_logs = self.activationlayer(hidden_logs)
if self.num_hiddenlayers > 2:
hidden_logs = self.hidden111(hidden_logs)
hidden_logs = self.activationlayer(hidden_logs)
if self.num_hiddenlayers > 3:
hidden_logs = self.hidden1111(hidden_logs)
hidden_logs = self.activationlayer(hidden_logs)
return self.logits(hidden_logs)
def action(self, obs):
logits = self.predict_on_batch(obs)
action = self.dist.predict_on_batch(logits)
return tf.squeeze(action, axis=-1)
class PolicyModel2(tf.keras.Model):
def __init__(self, num_actions, num_hiddenlayers = 2, num_neurons = 32, activationlayer = kl.LeakyReLU()):
super().__init__('mlp_policy')
self.activationlayer = activationlayer
self.hidden1 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1)) #hidden layer for actions (policy)
self.norm1 = kl.BatchNormalization()
self.hidden11 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1))
self.norm11 = kl.BatchNormalization()
# self.hidden111 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1))
# self.norm111 = kl.BatchNormalization()
# self.hidden1111 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1))
# self.norm1111 = kl.BatchNormalization()
# Logits are unnormalized log probabilities
self.logits = kl.Dense(num_actions, name = 'policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs, training = True, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_logs = self.hidden1(x)
hidden_logs = self.activationlayer(hidden_logs)
hidden_logs = self.norm1(hidden_logs, training = training)
hidden_logs = self.hidden11(hidden_logs)
hidden_logs = self.activationlayer(hidden_logs)
hidden_logs = self.norm11(hidden_logs, training = training)
# hidden_logs = self.hidden111(hidden_logs)
# hidden_logs = self.activationlayer(hidden_logs)
# hidden_logs = self.norm111(hidden_logs, training = training)
# hidden_logs = self.hidden1111(hidden_logs)
# hidden_logs = self.activationlayer(hidden_logs)
# hidden_logs = self.norm1111(hidden_logs, training = training)
return self.logits(hidden_logs)
def action(self, obs):
logits = self.call(obs)
action = self.dist(logits)
return tf.squeeze(action, axis=-1)
class PolicyModel3(tf.keras.Model):
def __init__(self, num_actions, num_hiddenlayers = 2, num_neurons = 32, activationlayer = kl.LeakyReLU()):
super().__init__('mlp_policy')
self.hidden1 = kl.Dense(560, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16)) #hidden layer for actions (policy)
self.norm1 = kl.BatchNormalization()
self.hidden11 = kl.Dense(270, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16))
self.norm11 = kl.BatchNormalization()
self.hidden111 = kl.Dense(num_actions*10, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16))
self.norm111 = kl.BatchNormalization()
# Logits are unnormalized log probabilities
self.logits = kl.Dense(num_actions, name = 'policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs, training = False, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_logs = self.hidden1(x)
# hidden_logs = self.norm1(hidden_logs, training = training)
hidden_logs = self.hidden11(hidden_logs)
# hidden_logs = self.norm11(hidden_logs, training = training)
hidden_logs = self.hidden111(hidden_logs)
# hidden_logs = self.norm111(hidden_logs, training = training)
return self.logits(hidden_logs)
def action(self, obs):
logits = self.call(obs)
action = self.dist(logits)
return tf.squeeze(action, axis=-1)
class ValueModel(tf.keras.Model):
def __init__(self, num_hiddenlayers = 3, num_neurons=64, activationlayer = kl.ELU()):
super().__init__('mlp_policy')
self.num_hiddenlayers=num_hiddenlayers
self.activationlayer = activationlayer
self.hidden2 = kl.Dense(num_neurons) #hidden layer for state-value
self.hidden22 = kl.Dense(num_neurons)
if self.num_hiddenlayers > 2:
self.hidden222 = kl.Dense(num_neurons)
if self.num_hiddenlayers > 3:
self.hidden2222 = kl.Dense(num_neurons)
self.val = kl.Dense(1, name = 'value')
def call(self, inputs, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_vals = self.hidden2(x)
hidden_vals = self.activationlayer(hidden_vals)
hidden_vals = self.hidden22(hidden_vals)
hidden_vals = self.activationlayer(hidden_vals)
if self.num_hiddenlayers > 2:
hidden_vals = self.hidden222(hidden_vals)
hidden_vals = self.activationlayer(hidden_vals)
if self.num_hiddenlayers > 3:
hidden_vals = self.hidden2222(hidden_vals)
hidden_vals = self.activationlayer(hidden_vals)
return self.val(hidden_vals)
def value(self, obs):
value = self.predict_on_batch(obs)
return tf.squeeze(value, axis=-1)
class ValueModel2(tf.keras.Model):
def __init__(self, num_hiddenlayers = 3, num_neurons=64, activationlayer = kl.ReLU()):
super().__init__('mlp_policy')
self.activationlayer = activationlayer
self.hidden2 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1)) #hidden layer for state-value
self.norm2 = kl.BatchNormalization()
self.hidden22 = kl.Dense(num_neurons, kernel_regularizer = tf.keras.regularizers.l2(l=.1))
self.norm22 = kl.BatchNormalization()
self.val = kl.Dense(1, name = 'value')
def call(self, inputs, training = True, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_vals = self.hidden2(x)
hidden_vals = self.activationlayer(hidden_vals)
hidden_vals = self.norm2(hidden_vals, training = training)
hidden_vals = self.hidden22(hidden_vals)
hidden_vals = self.activationlayer(hidden_vals)
hidden_vals = self.norm22(hidden_vals, training = training)
return self.val(hidden_vals)
def value(self, obs):
value = self.call(obs)
return tf.squeeze(value, axis=-1)
class ValueModel3(tf.keras.Model):
def __init__(self, num_hiddenlayers = 3, num_neurons=64, activationlayer = kl.ReLU()):
super().__init__('mlp_policy')
self.activationlayer = activationlayer
self.hidden2 = kl.Dense(560, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16)) #hidden layer for state-value
self.norm2 = kl.BatchNormalization()
self.hidden22 = kl.Dense(52, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16))
self.norm22 = kl.BatchNormalization()
self.hidden222 = kl.Dense(5, activation='tanh', kernel_regularizer = tf.keras.regularizers.l2(l=.16))
self.norm222 = kl.BatchNormalization()
self.val = kl.Dense(1, name = 'value')
def call(self, inputs, training = False, **kwargs):
x = tf.convert_to_tensor(inputs)
hidden_vals = self.hidden2(x)
# hidden_vals = self.norm2(hidden_vals, training = training)
hidden_vals = self.hidden22(hidden_vals)
# hidden_vals = self.norm22(hidden_vals, training = training)
hidden_vals = self.hidden222(hidden_vals)
# hidden_vals = self.norm222(hidden_vals, training = training)
return self.val(hidden_vals)
def value(self, obs):
value = self.call(obs)
return tf.squeeze(value, axis=-1)
class ValueModelReinforce(tf.keras.Model): #?What is this for?
def __init__(self):
super().__init__('mlp_policy')
self.hidden = kl.Dense(1)
self.threshold = kl.ThresholdedReLU(theta=math.inf)
def call(self, inputs, **kwargs):
return self.threshold(self.hidden(inputs))
def value(self, obs):
value = self.predict_on_batch(obs)
return tf.squeeze(value, axis=-1)
class ValueModelLinearBaseline(tf.keras.Model):
def __init__(self):
super().__init__('mlp_policy')
self.hidden = kl.Dense(1, activation=None)
def call(self, inputs, **kwargs):
return self.hidden(inputs)
def value(self, obs):
value = self.predict_on_batch(obs)
return tf.squeeze(value, axis=-1)
class ACagent:
def __init__(self,policymodel, valuemodel, data_sz = 256, batch_sz=80, lr = 0.000085, entropy_const = 1e-6, epochs = 20):
#self.model = model
self.policymodel = policymodel
self.valuemodel = valuemodel
self.policymodel.compile(
optimizer = tf.keras.optimizers.RMSprop(learning_rate = lr),
loss = [self._logits_loss])
self.valuemodel.compile(
optimizer = tf.keras.optimizers.RMSprop(learning_rate = lr),
loss = [self._value_loss])
self.gamma = 1 #discounting
self.data_sz = data_sz
self.batch_sz = batch_sz #batch size
self.epochs = epochs
self.entropy_const = entropy_const #constant for entropy maximization term in logit loss function
self.logit2logprob = kls.SparseCategoricalCrossentropy(from_logits=True) #tensorflow built in converts logits to log probability
def action_value(self, obs):
return self.policymodel.action(obs), self.valuemodel.value(obs)
def reset(self, env):
state = env.reset()
self.counter = 0
return state
def test(self,env,nruns = 4):
#nruns = 4 - number of episodes simulated
#returns - list of total (undiscounted) rewards for each episode, list of nubmer of timesteps in each episode
curstate = self.reset(env)
run = 0
rewards = []
rewardslist = []
eplenlist = []
while (run < nruns):
while True:
action, value = self.action_value(curstate) #if using batch normalization may want to pass training = False to the model.call
curstate, reward, done = env.step(action)
rewards.append(reward)
self.counter += 1
if done:
eplenlist.append(self.counter)
rewardslist.append(sum(rewards))
if (run + 1 < nruns):
curstate = self.reset(env)
rewards = []
break
run += 1
return rewardslist, eplenlist
def train(self, env, updates=250, by_eps = False, numeps = 1, nTDsteps = 5, simlen = 1500):
#env - environment
#updates - number of times we will call model.fit. This is the number of iterations of the outer loop.
#before the first update, the environment is reset. after that, the environment is only reset if done = True is returned
#by_eps = False - if True, we generate entire episodes at a time.
#If False, we generate self.data_sz steps at a time
#numeps = 1 - if by_eps = True, numeps is the number of entire episodes generated
#nTDsteps = 5 - number of steps used for temporal difference errors (also known as advantages)
#if nTDsteps = -1, then the maximum number of steps possible is used
#simlen = 1500 - if by_eps = True, the arrays are all initialized with numeps * simlen size,
#so you must provide an upper bound for the number of steps in a single episode
#returns - ep_rewards, total (undiscounted) rewards for all complete episodes
#initialize
curstate = self.reset(env)
leftover = 0 #leftover has sum of undiscounted rewards from an unfinished episode in previous batch
#memory
data_sz = env.simlen * numeps if by_eps else self.data_sz
if nTDsteps < 0:
nTDsteps = data_sz
statemem = np.empty((data_sz,env.state_dim))
rewards = np.empty((data_sz))
values = np.empty((data_sz))
actions = np.empty(data_sz)
dones = np.empty((data_sz))
#output
ep_rewards = []
ep_lens = []
action,value = self.action_value(curstate)
for i in tqdm(range(updates)):# for i in tqdm(range(updates)):
batchlen = 0 #batchlen keeps track of how many steps are in inner loop. batchlen = bstep + 1
#(self.counter keeps track of how many steps since start of most recent episode)
epsdone = 0 #keeps track of number of episodes simulated
curindex = 0 #keeps track of index for start of current episode
#(or if episode is continueing from previous batch, curindex = 0)
firstdone = -1
gammafactor = self.counter
for bstep in range(data_sz):
statemem[bstep] = curstate
nextstate, reward, done = env.step(action, False)
nextaction, nextvalue = self.action_value(nextstate)
self.counter += 1
rewards[bstep] = reward
values[bstep] = value
dones[bstep] = done
actions[bstep] = action
batchlen += 1
action, value, curstate = nextaction, nextvalue, nextstate
if done: #reset simulation
ep_rewards.append(sum(rewards[curindex:batchlen])+leftover)
ep_lens.append(self.counter)
curindex = batchlen
leftover = 0
curstate = self.reset(env)
action,value = self.action_value(curstate)
epsdone += 1
if by_eps and epsdone == numeps:
break
if firstdone == -1:
firstdone = batchlen
leftover += sum(rewards[curindex:batchlen]) #if an episode goes over several batches, keep track of cumulative rewards
gamma_adjust = np.ones(batchlen)
adj_idx = firstdone if (firstdone!= -1) else batchlen #update all gammas if no dones in batch
gamma_adjust[:adj_idx] = self.gamma**gammafactor
TDerrors = self._TDerrors(rewards[:batchlen], values[:batchlen], dones[:batchlen], nextvalue, gamma_adjust, nTDsteps)
TDacc = np.reshape(np.append(TDerrors, actions[:batchlen]), (batchlen,2), order = 'F')
self.policymodel.fit(statemem[:batchlen,:], TDacc, batch_size = self.batch_sz, epochs = self.epochs, verbose = 0)
self.valuemodel.fit(statemem[:batchlen,:], TDerrors, batch_size = self.batch_sz, epochs = self.epochs, verbose = 0)
return ep_rewards, ep_lens
def _TDerrors(self, rewards, values, dones, nextvalue, gamma_adjust, nstep, normalize = False):
returns = np.append(np.zeros_like(rewards), nextvalue)
stepreturns = np.zeros_like(rewards)
for t in reversed(range(rewards.shape[0])):
#cumulative rewards
returns[t] = rewards[t] + self.gamma*returns[t+1]*(1 - dones[t])
#adjustment for nstep
if ((t + nstep < len(returns)-1) and (1 not in dones[t:t+nstep])):
stepreturns[t] = returns[t] \
- self.gamma**nstep*returns[t+nstep] \
+ self.gamma**nstep*values[t+nstep]
else:
stepreturns[t] = returns[t]
returns = | np.multiply(stepreturns, gamma_adjust) | numpy.multiply |
#Author : <NAME> <EMAIL>
import numpy as np
import numpy.linalg as LA
import warnings
import cvxpy as cv
import matplotlib.pyplot as plt
import math
import scipy.integrate as sio
class DMD:
def __init__(self, dt=1, r=1e32, scale_modes=True, stack_factor='estimate',
use_optimal_SVHT=False, jovanovich=False,
condensed_jovanovich=False):
"""
Dynamic Mode Decomposition (DMD)
Estimates the modes of the dynamics of matrix X. Each spatial mode has
corresponding growth and frequency characteristics.
Parameters
----------
dt : float
Timestep of data
r : int
Number of modes to truncate to
scale_modes : boolean
Scale the spatial modes
stack_factor : int, [string]
The number of times to stack the X matrix upon `fit` such that
the train matrix has more rows than columns.
use_optimal_SVHT : boolean
Use optimal SVHT to estimate the number of modes to truncate to i.e. `self.r`
jovanovich : boolean
Deprecated
condensed_jovanonich : boolean
Deprecated
Returns
--------
mrDMD : object
The blank instance of mrDMD with given parameters.
See Also
--------
class: `mrDMD`
"""
self.jovanovich = jovanovich
self.condensed_jovanovich = condensed_jovanovich
self.Vand = None
self.alphas = None
self.real = True # assume X and Xhat to be real
self.Phi = None # the modes
#denoted as omega
self.mu = None #fourier spectrum of modes (mu = log(lambda)/dt)
self.timesteps = None #the default # of timesteps
self.lambdas = None #D, the DMD spectrum of modes
self.diagS = None # the singular values of the data matrix
self.x0 = None #initial condition vector corresponding to Phi
self.dt = dt # timestep
self.r = r # number to truncate DMD modes to
self.scale_modes = scale_modes
self.Xraw = None
self.Xhat = None
self.z0 = None
self.Atilde = None
self.stack_factor = stack_factor
self.Xaug = None
self.use_optimal_SVHT = use_optimal_SVHT
def _augment_x(self, Xraw):
""" Stack the features of the data Xraw such that
timesteps >= features where the rows of Xraw is the
number of features/channels
and the columns of Xraw are the timesteps.
Parameters
--------
Xraw : matrix-like
The raw data matrix.
Returns
--------
Xaug : matrix-like
The augmented raw data matrix.
"""
shape = Xraw.shape
#estimate optimal stacking
if self.stack_factor == 'estimate':
self._estimate_stack_factor(*shape)
assert(self.stack_factor != 'estimate')
else:
if self.stack_factor < 0:
raise ValueError("`stack_factor` can not be negative")
if type(self.stack_factor) != int:
raise ValueError("`stack_factor` must be of type `int`")
if self.stack_factor < 2: #if user inputted stack_factor of 1
warnings.warn('stack_factor must always be at least 2 or greater to capture frequency content')
self.Xaug = Xraw
return Xraw
else:
# Xaug can not be less than 2 columns
if (shape[1] - (self.stack_factor - 1)) < 2:
raise ValueError("`stack_factor` can not exceed X.shape[1] due to shifting")
new_col = shape[1] - (self.stack_factor - 1)
if new_col < 2:
raise ValueError("`timesteps` are too low for given `stack_factor`")
#concatenate by shift stacking features
row_block = shape[0] * self.stack_factor
Xaug = np.full((row_block, new_col), np.nan)
Xaug[:shape[0], :] = Xraw[:, :new_col]
for i in range(1, self.stack_factor):
start = i * shape[0]
Xaug[(start):(start + shape[0]), :] = Xraw[:, i:(new_col + i)]
self.Xaug = Xaug
return Xaug
def _truncate(self, raw_shape, U, S, V, diagS):
"""
Handle the truncation of the SVD decomposition,
either by truncating to the prespecified r inputed
during initialization or by calling the optimal
hard threshold.
Parameters
----------
raw_shape : tuple
The shape of Xaug or Xraw
U : matrix-like
S : matrix-like
V : matrix-like
diagS : array-like
Diagonal values of S
Returns
-------
U : matrix-like truncated to r columns
S : matrix-like truncated to r columns
V : matrix-like truncated to r columns
See Also
--------
:class `DMD._estimate_optimal_SVHT`
"""
if len(diagS.shape) != 1:
raise ValueError("`diagS` must be array-like")
if self.use_optimal_SVHT:
self._estimate_optimal_SVHT(raw_shape, diagS)
if U.shape[1] <= self.r:
return U, S, V
S = S[:self.r, :self.r]
self.diagS = diagS[:self.r]
U = U[:,:self.r]
V = V[:,:self.r]
return U, S, V
def fit(self, Xraw):
"""
Public call to fit DMD modes to Xraw
Parameters
----------
Xraw : matrix-like
The raw data matrix
Returns
-------
self : DMD object
Returns the instance of itself
See Also
--------
:class:`DMD._fit` : private call on `fit`
"""
self._fit(Xraw)
return self
def _fit(self, Xraw):
""" Private call to fit DMD modes to Xraw
Parameters
----------
Xraw : matrix-like
The raw data matrix
See Also
--------
:class:`DMD.fit` : public call on `_fit`
"""
if isinstance(Xraw, complex): self.real = False
raw_shape = Xraw.shape
assert(len(raw_shape) == 2)
if self.timesteps is None:
self.timesteps = raw_shape[1]
self.Xraw = Xraw.copy()
Xraw = self._augment_x(Xraw)
self.x0 = Xraw[:,0].copy()
X = Xraw[:,:-1].copy()
Y = Xraw[:,1:].copy()
#compute the 'econ' matrix of X1
[U, diagS, V_t] = LA.svd(X, full_matrices=False)
#!! is the transpose (different from Matlab)
V = V_t.T
if all(v==0 for v in diagS):
warnings.warn('Xraw is a 0 vector')
S = np.diag(diagS)
U_r, S_r, V_r = self._truncate(raw_shape, U, S, V, diagS)
Atilde = U_r.T.dot(Y).dot(V_r).dot(np.diag(1 / np.diag(S_r)))
assert( not np.isinf(Atilde).any())
if self.scale_modes and (not self.jovanovich) \
and (not self.condensed_jovanovich): # scaling modes
S_r_neg = S_r.copy() # S^-1/2
S_r_pow = S_r.copy() # S^1/2
S_r_neg[np.diag_indices(len(S_r))] = 1 / (S_r_neg.diagonal()**0.5)
S_r_pow[np.diag_indices(len(S_r))] = S_r_pow.diagonal()**0.5
Ahat = np.dot(S_r_neg, Atilde).dot(S_r_pow)
#below: theoretic. equivalent but impossible in python:
#Ahat = (S_r^(-1/2)).dot(Atilde).dot(S_r^(1/2))
lambdas, What = LA.eig(Ahat)
W = S_r_pow.dot(What)
else:
# W is the matrix of eigen vectors
#lambdas is the DMD eigen values
lambdas, W = LA.eig(Atilde)
if self.jovanovich or self.condensed_jovanovich:
Phi = U_r.dot(W) # alternate calculation of Phi
Vand = np.vander(lambdas, raw_shape[1], increasing=True)
self.Vand = Vand.copy()
Vand = Vand[:, :X.shape[1]]
d = cv.Variable(len(lambdas))
if self.condensed_jovanovich:
#match the dimensions of S since Y is stacked anyway
if W.shape[0] > S.shape[0]:
local_W = W[:S.shape[0],:]
else:
local_W = W
SV = S.dot(V_t)
objective = cv.Minimize(cv.square(cv.norm(SV
- local_W * cv.diag(d) * Vand, "fro")))
else:
objective = cv.Minimize(cv.square(cv.norm(X
- Phi * cv.diag(d) * Vand, "fro")))
constraints = [d >= 0.0]
#import pdb; pdb.set_trace()
prob = cv.Problem(objective, constraints)
optimal_value = prob.solve()
self.alphas = np.array(d.value)
#TODO add in constraints of power list of bools involving d
#TODO add additional method using E V*
else:
Phi = Y.dot(V_r).dot(np.diag(1 / np.diag(S_r))).dot(W)
self.Phi = Phi
self.lambdas = lambdas
self.Atilde = Atilde
if not any(self.lambdas.imag):
warnings.warn("Lambdas contain no complex components, self.r : %d" % self.r)
#np.log accepts negative complex values
self.mu = np.log(lambdas) / self.dt #denoted as omega in paper
def fit_transform(self, Xraw, timesteps='default', compute_error=False,
keep_modes=None, unaugment=True):
"""
Fits the DMD modes to the data and creates a reconstructed
data matrix Xhat. Also updates the reconstruction error.
Parameters
--------
Xraw : matrix-like
Raw data matrix
timesteps : float
Number of timesteps to include in the reconstructed data
matrix. If timesteps == 'default', it will use the original columns
of the Xraw matrix passed in.
compute_error : Boolean
If true returns the reconstruction error : |Xraw - Xhat|
keep_modes : array-like
An array of indices to the modes (columns) to keep in the reconstruction
Default is None which uses all modes of Phi to reconstruct
unaugment : Boolean
Augment the Xraw via shift stacking. See self._estimate_stack_factor
and cited paper for discussion on this behavior.
Returns
--------
Xhat : matrix-like
The reconstructed Xaug
E : scalar
The reconstruction error
See Also
--------
:class: `DMD.transform` : public call on `transform`
:class:`DMD.fit` : public call on `fit`
"""
if timesteps is 'default':
timesteps = self.Xraw.shape[1]
self._fit(Xraw)
self.timesteps = timesteps
self.keep_modes = keep_modes
Xhat = self._transform(keep_modes,
compute_error=compute_error, unaugment=unaugment)
if compute_error:
return Xhat, self.E
else:
return Xhat
def transform(self, timesteps='default', compute_error=False,
keep_modes=None, unaugment=True):
"""
Public call on _transform.
Reconstructs the original data matrix Xaug
from the DMD modes and initial conditions
Parameters
--------
timesteps : float
number of timesteps to include in the reconstructed data
matrix
compute_error : boolean
If true returns the reconstruction error : |Xraw - Xhat|
keep_modes : array-like
An array of indices to the modes (columns) to keep in the reconstruction
Default is None which uses all modes of Phi to reconstruct Xhat
unaugment : boolean
augment the Xraw via shift stacking. See self._estimate_stack_factor
and cited paper for discussion on this behavior.
Returns
--------
Xhat : matrix-like, float
The reconstructed Xaug
E : scalar
The reconstruction error
See Also
--------
:class: `DMD._transform` : private call on `transform`
"""
if self.Xraw is None:
raise ValueError('Xraw is None, you must call fit()\
or fit_transform() before calling\
transform()')
if timesteps is 'default':
timesteps = self.Xraw.shape[1]
self.timesteps = timesteps
self.keep_modes = keep_modes
Xhat = self._transform(keep_modes,
compute_error=compute_error, unaugment=unaugment)
if compute_error:
return Xhat, self.E
else:
return Xhat
def _transform(self, keep_modes, compute_error=False, unaugment=True, t_list='default'):
"""
Reconstruct the original data matrix Xaug
from the DMD modes and initial conditions.
Parameters
----------
keep_modes : array-like
An array of indices to the modes (columns) to keep in the reconstruction
Default is None which uses all modes of Phi to reconstruct Xhat
compute_error : boolean
If true returns the reconstruction error : |Xraw - Xhat|
unaugment : boolean
Augment the Xraw via shift stacking. See self._estimate_stack_factor
and cited paper for discussion on this behavior.
t_list : array-like
Create reconstruction for custom list of times
Returns
-------
Xhat : matrix-like, float, (features, timesteps)
The reconstructed Xaug where timesteps is the length of x0
E : scalar
The reconstruction error
Notes
-----
Xhat will only come out with non-zero imaginary
components; if the original data matrix Xraw was not
strictly real valued, otherwise Xhat will also be a
complex matrix.
See Also
--------
:class: `DMD._transform` : private call on `transform`
"""
if t_list is 'default':
timesteps = self.timesteps
else:
timesteps = len(t_list)
Phi = self.Phi
Vand = self.Vand
alphas = self.alphas
lambdas = self.lambdas
#update mu in case dt has changed
mu = np.log(lambdas) / self.dt #denoted as omega in paper
alphas = | np.squeeze(self.alphas) | numpy.squeeze |
"""takes select soscillatorArray: "osc" instance from model.py and constructs plot
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from lib.plotformat import setup
import re
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from scipy.interpolate import RectBivariateSpline
def save_data(data:np.ndarray,
file_name:str = 'model_data',
level:int = 3):
print(file_name)
fmt = setup(file_name,level)
np.save(fmt.plot_name(file_name,'npy'),data)
def plot_output(model,osc,
data:np.ndarray,
time:np.ndarray,
samples:int=4,
seconds:int=4,
scale:bool = False,
file_name:str = 'model_data'):
# print(data.shape)
for k in np.arange(data.shape[2]):
model.plot_contour(osc,data[...,k],time[k],)
if samples and np.round(100*time[k])%100==0 and not time[k]==time[-1]:
print(np.round(time[k]))
idx=np.where(time>=time[k])[0] # larger set of two
idy=np.where(time<time[k]+seconds)[0]
idz = idx[np.in1d(idx, idy)] # intersection of sets
model.plot_timeseries(osc,
data[...,idz],
time[idz],
samples,
seconds)
################################################################################
def plot_timeseries(osc,
z:np.ndarray,
t:np.ndarray,
samples:int=3,
seconds:int = 2,
title:str = None,
y_axis:str = '$\cos(\omega*t)$',
x_axis:str = 'time, s',
):
"""plot the solution timeseries for a random cluster of neighbor nodes
"""
if not title:
title = 'Timeseries for {s} Random Neighbors R={r:.2f} $\\beta$={beta:.2f} K/N={kn:.1f} & c={c:.0f})'.format(s=samples,
**osc.interaction_params,
**osc.kernel_params,
kn=np.round(osc.gain/np.prod(osc.ic.shape))
)
if t[0]:
if t[0]>10:
title+=f' at t = {t[0]:.0f} to {t[-1]:.0f}'
else:
title+=f' at t = {t[0]:2.1f} to {t[-1]:2.1f}'
fmt = setup(title,osc.level) # plotting format osc
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
rng = np.random.default_rng()
rnd_node = rng.choice(np.arange(z.shape[0]),
size=2,
replace=False,
)
# TODO generalize this to larger square m*n
neighbors = np.array([[1,1,-1,-1,0,0,1,-1],
[1,-1,1,-1,1,-1,0,0]]).T
idx = np.broadcast_to(rnd_node,neighbors.shape) + neighbors
##validate in range, since these are 2d but coupled pairs and where returns 1d just use unique
# idlimit = np.where(idx<=z.shape[0:2])[0]
# idzero = np.where(idx>=0)[0]
# indx within limit
idlimit0 = np.where(idx[:,0]<z.shape[0])[0]
idlimit1 = np.where(idx[:,1]<z.shape[1])[0]
# indx >0, actually if ~-1, -2 that is permissable but it won't be as local
idzero0 = | np.where(idx[:,0]>=0) | numpy.where |
import numpy as np
import numpy
import math
import logging
logger = logging.getLogger(__name__)
# Set reasonable precision for comparing floats to zero. Originally the multiplier was
# 10, but I needed to set this to 1000 because some of the trimesh distance methods
# do not see as accurate as with primitive shapes.
EPS_ZERO = np.finfo(float).eps * 1000
def on_aabb_surface(size, point, centre=(0.0, 0.0, 0.0), atol=EPS_ZERO):
""" Surface test for axis-aligned bounding box with absolute distance
tolerance along surface normal direction.
>>> size = (1.0, 1.0, 1.0)
>>> centre = (0.0, 0.0, 0.0)
>>> pt = np.array([0.5, np.random.uniform(-0.5*size[1], 0.5*size[1]), np.random.uniform(-0.5*size[2], 0.5*size[2])])
>>> atol = 1e-8
>>> on_aabb_surface(size, pt, centre=centre, atol=1e-8)
True
>>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8)
False
>>> on_aabb_surface(size, pt + np.array([atol, 0.0, 0.0]), centre=centre, atol=1e-8)
False
"""
origin = np.array(centre) - 0.5 * np.array(size)
extent = np.array(centre) + 0.5 * | np.array(size) | numpy.array |
"""
A class hierarchy for subgrid error estimation methods (multiscale methods)
.. inheritance-diagram:: proteus.SubgridError
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
from builtins import object
import numpy
from . import csubgridError
from . import FemTools
from .Profiling import logEvent
class SGE_base(object):
def __init__(self,coefficients,nd,lag=False,trackSubScales=False):
self.nc = coefficients.nc
self.nd = nd
self.components=list(range(self.nc))
self.lag=lag
self.coefficients=coefficients
self.trackSubScales = trackSubScales
self.usesGradientStabilization = False
def initializeElementQuadrature(self,mesh,t,cq):
self.mesh=mesh
self.tau=[]
self.tau_last=[]
for ci in range(self.nc):
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
else:
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
for cj in range(self.nc):
if ('df',ci,cj) in cq:
cq[('df_sge',ci,cj)]=cq[('df',ci,cj)]
if ('dH',ci,cj) in cq:
cq[('dH_sge',ci,cj)]=cq[('dH',ci,cj)]
if ('dm',ci,cj) in cq:
cq[('dm_sge',ci,cj)]=cq[('dm',ci,cj)]
if ('dmt',ci,cj) in cq:
cq[('dmt_sge',ci,cj)]=cq[('dmt',ci,cj)]
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=cq[('dphi',ck,cj)]
cq[('da_sge',ci,ck,cj)]=cq[('da',ci,ck,cj)]
def initializeTimeIntegration(self,timeIntegration):
"""
allow for connection with time integration method if tracking subscales
"""
pass
def calculateSubgridError(self,q):
pass
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
def accumulateSubgridMassHistory(self,q):
"""
incorporate subgrid scale mass accumulation
\delta m^{n}/\delta t^{n+1}
"""
pass
class Advection_ASGS(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
for ci in range(self.nc):
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
if ('df',ci,ci) in cq:
self.df_last = copy.deepcopy(cq[('df',ci,ci)])
cq[('df_sge',ci,ci)] = self.df_last
else:
if ('df',ci,ci) in cq:
cq[('df_sge',ci,ci)] = cq[('df',ci,ci)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
self.df_last[:] = self.cq[('df',ci,ci)]
def calculateSubgridError(self,q):
for ci in range(self.nc):
csubgridError.calculateSubgridError_A_tau(self.stabilizationFlag,
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('df',ci,ci)],
q[('cfl',ci)],
self.tau[ci])
if self.lag:
tau=self.tau_last[ci]
else:
tau=self.tau[ci]
for cj in range(self.nc):
if ('dpdeResidual',ci,cj) in q:
csubgridError.calculateSubgridError_tauRes(tau,
q[('pdeResidual',ci)],
q[('dpdeResidual',ci,cj)],
q[('subgridError',ci)],
q[('dsubgridError',ci,cj)])
class AdvectionLag_ASGS(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
for ci in range(self.nc):
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
if ('df',ci,ci) in cq:
self.df_last = copy.deepcopy(cq[('df',ci,ci)])
cq[('df_sge',ci,ci)] = self.df_last
else:
if ('df',ci,ci) in cq:
cq[('df_sge',ci,ci)] = cq[('df',ci,ci)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
self.df_last[:] = self.cq[('df',ci,ci)]
def calculateSubgridError(self,q):
for ci in range(self.nc):
csubgridError.calculateSubgridError_A_tau(self.stabilizationFlag,
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('df_sge',ci,ci)],
q[('cfl',ci)],
self.tau[ci])
tau=self.tau[ci]
for cj in range(self.nc):
if ('dpdeResidual',ci,cj) in q:
csubgridError.calculateSubgridError_tauRes(tau,
q[('pdeResidual',ci)],
q[('dpdeResidual',ci,cj)],
q[('subgridError',ci)],
q[('dsubgridError',ci,cj)])
class AdvectionDiffusionReaction_ASGS(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.cq=cq
for ci in range(self.nc):
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
if ('df',ci,ci) in cq:
cq[('df_sge',ci,ci)] = copy.deepcopy(cq[('df',ci,ci)])
if ('dm',ci,ci) in cq:
cq[('dm_sge',ci,ci)] = copy.deepcopy(cq[('dm',ci,ci)])
if ('dmt',ci,ci) in cq:
cq[('dmt_sge',ci,ci)] = copy.deepcopy(cq[('dmt',ci,ci)])
else:
if ('df',ci,ci) in cq:
cq[('df_sge',ci,ci)] = cq[('df',ci,ci)]
if ('dm',ci,ci) in cq:
cq[('dm_sge',ci,ci)] = cq[('dm',ci,ci)]
if ('dmt',ci,ci) in cq:
cq[('dmt_sge',ci,ci)] = cq[('dmt',ci,ci)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
for ci,ckDict in self.coefficients.diffusion.items():
if self.lag:#mwf looks like this was missing if lag May 7 09
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=copy.deepcopy(cq[('grad(phi)',ck)])
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=copy.deepcopy(cq[('dphi',ck,cj)])
cq[('da_sge',ci,ck,cj)]=copy.deepcopy(cq[('da',ci,ck,cj)])
else:
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=cq[('dphi',ck,cj)]
cq[('da_sge',ci,ck,cj)]=cq[('da',ci,ck,cj)]
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
#mwf should these be deep copies?
self.cq[('df_sge',ci,ci)][:] = self.cq[('df',ci,ci)]
self.cq[('dm_sge',ci,ci)][:] = self.cq[('dm',ci,ci)]
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
self.cq[('grad(phi)_sge',ck)][:]=self.cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
self.cq[('dphi_sge',ck,cj)][:]=0.0 #grad(phi) will be a constant when lagged so dphi=0 not 1
self.cq[('da_sge',ci,ck,cj)][:]=self.cq[('da',ci,ck,cj)]
def calculateSubgridError(self,q):
oldTau=False#True #mwf oldTau not working with sd!
for ci in range(self.nc):
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_tau_sd(self.stabilizationFlag,
self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('df',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
csubgridError.calculateSubgridError_ADR_tau(self.stabilizationFlag,
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('df',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_generic_tau_sd(self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],
q['inverse(J)'],
q[('dmt',ci,ci)],
q[('df',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
csubgridError.calculateSubgridError_ADR_generic_tau(q['inverse(J)'],
q[('dmt',ci,ci)],
q[('df',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
if self.lag:
tau=self.tau_last[ci]
else:
tau=self.tau[ci]
for cj in range(self.nc):
if ('dpdeResidual',ci,cj) in q:
csubgridError.calculateSubgridError_tauRes(tau,
q[('pdeResidual',ci)],
q[('dpdeResidual',ci,cj)],
q[('subgridError',ci)],
q[('dsubgridError',ci,cj)])
#mwf debug
#import pdb
#pdb.set_trace()
# print "tau",tau
# print "pdeResidual",q[('pdeResidual',ci)]
# print "dpdeResidual",q[('dpdeResidual',ci,ci)]
# print "subgrid error",q[('subgridError',ci)]
# print "dsubgrid error",q[('dsubgridError',ci,ci)]
class FFDarcyFC_ASGS(SGE_base):
"""
basic stablization for TwophaseDarcy_fc_ff, only 'mixture' equation has advection term
'w' phase equation has nonlinear diffusion wrt mixture potential,
'mixture' equation has two nonlinear diffusion terms
"""
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
self.dftemp = None
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
for ci in [0]:
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
if ('df',ci,ci) in cq:
self.df_last = copy.deepcopy(cq[('df',ci,ci)])
cq[('df_sge',ci,ci)] = self.df_last
else:
if ('df',ci,ci) in cq:
cq[('df_sge',ci,ci)] = cq[('df',ci,ci)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.cq=cq
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=copy.deepcopy(cq[('grad(phi)',ck)])
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=copy.deepcopy(cq[('dphi',ck,cj)])
cq[('da_sge',ci,ck,cj)]=copy.deepcopy(cq[('da',ci,ck,cj)])
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in [0]:
self.tau_last[ci][:] = self.tau[ci]
#self.df_last[:] = self.cq[('df',ci,ci)]
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
self.cq[('grad(phi)_sge',ck)][:]=self.cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
self.cq[('dphi_sge',ck,cj)][:]=0.0 #grad(phi) will be a constant when lagged so dphi=0 not 1
self.cq[('da_sge',ci,ck,cj)][:]=self.cq[('da',ci,ck,cj)]
def calculateSubgridError(self,q):
oldTau = False
if self.dftemp is None or self.dftemp.shape != q[('grad(phi)',1)].shape:
self.dftemp = numpy.zeros(q[('grad(phi)',1)].shape,'d')
ci = 0; cj = 0; ck = 1;
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_tau_sd(self.stabilizationFlag,
self.coefficients.sdInfo[(0,1)][0],self.coefficients.sdInfo[(0,1)][1],
self.mesh.elementDiametersArray,
q[('dmt',0,0)],
self.dftemp,
q[('a',0,1)],
q[('da',0,1,0)],
q[('grad(phi)',1)],
q[('dphi',1,0)],
q[('dr',0,0)],
q[('pe',0)],
q[('cfl',0)],
self.tau[0])
else:
csubgridError.calculateSubgridError_ADR_tau(self.stabilizationFlag,
self.mesh.elementDiametersArray,
q[('dmt',0,0)],
self.dftemp,
q[('a',0,1)],
q[('da',0,1,0)],
q[('grad(phi)',1)],
q[('dphi',1,0)],
q[('dr',0,0)],
q[('pe',0)],
q[('cfl',0)],
self.tau[0])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_generic_tau_sd(self.coefficients.sdInfo[(ci,ck)][0],self.coefficients.sdInfo[(ci,ck)][1],
q['inverse(J)'],
q[('dmt',ci,ci)],
self.dftemp,
q[('a',ci,ck)],
q[('da',ci,ck,cj)],
q[('grad(phi)',ck)],
q[('dphi',ck,cj)],
q[('dr',ci,cj)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
csubgridError.calculateSubgridError_ADR_generic_tau(q['inverse(J)'],
q[('dmt',ci,ci)],
self.dftemp,
q[('a',ci,ck)],
q[('da',ci,ck,cj)],
q[('grad(phi)',ck)],
q[('dphi',ck,cj)],
q[('dr',ci,cj)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
if self.lag:
tau=self.tau_last[0]
else:
tau=self.tau[0]
csubgridError.calculateSubgridError_tauRes(tau,
q[('pdeResidual',0)],
q[('dpdeResidual',0,0)],
q[('subgridError',0)],
q[('dsubgridError',0,0)])
# print "tau",tau
# print "pdeResidual",q[('pdeResidual',ci)]
# print "dpdeResidual",q[('dpdeResidual',ci,ci)]
# print "subgrid error",q[('subgridError',ci)]
# print "dsubgrid error",q[('dsubgridError',ci,ci)]
class DarcyFC_ASGS(SGE_base):
"""
basic stablization for TwophaseDarcy_fc, no advection term
'w' phase and 'n' phase have nonlinear diffusion wrt to their own potential
phi_w = psi_w, phi_n = psi_w + psi_c
"""
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
self.dftemp = None; self.drtmp = {(0,0):None,(1,0):None}
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
for ci in [0,1]:
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
else:
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.cq=cq
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=copy.deepcopy(cq[('grad(phi)',ck)])
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=copy.deepcopy(cq[('dphi',ck,cj)])
cq[('da_sge',ci,ck,cj)]=copy.deepcopy(cq[('da',ci,ck,cj)])
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in [0,1]:
self.tau_last[ci][:] = self.tau[ci]
#self.df_last[:] = self.cq[('df',ci,ci)]
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
self.cq[('grad(phi)_sge',ck)][:]=self.cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
self.cq[('dphi_sge',ck,cj)][:]=0.0 #grad(phi) will be a constant when lagged so dphi=0 not 1
self.cq[('da_sge',ci,ck,cj)][:]=self.cq[('da',ci,ck,cj)]
def calculateSubgridError(self,q):
oldTau=False
if self.dftemp is None or self.dftemp.shape != q[('grad(phi)',1)].shape:
self.dftemp = numpy.zeros(q[('grad(phi)',1)].shape,'d')
#'w' phase equation
ci = 0; cj = 0; ck = 0;
if ('dr',ci,cj) in q:
self.drtmp[(ci,cj)] = q[('dr',ci,cj)]
elif self.drtmp[(ci,cj)] is None:
self.drtmp[(ci,cj)] = numpy.zeros(q[('r',ci)].shape,'d')
if self.drtmp[(ci,cj)] is None or self.drtmp[(ci,cj)].shape != q[('r',ci)].shape:
self.drtmp[(ci,cj)] = | numpy.zeros(q[('r',ci)].shape,'d') | numpy.zeros |
import os
import os.path
from collections import defaultdict
from functools import partial
import numpy as np
from yt.frontends.art.definitions import (
hydro_struct,
particle_fields,
particle_star_fields,
star_struct,
)
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.fortran_utils import read_vector, skip
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
class IOHandlerART(BaseIOHandler):
_dataset_type = "art"
tb, ages = None, None
cache = None
masks = None
caching = True
def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
super().__init__(*args, **kwargs)
self.ws = self.ds.parameters["wspecies"]
self.ls = self.ds.parameters["lspecies"]
self.file_particle = self.ds._file_particle_data
self.file_stars = self.ds._file_particle_stars
self.Nrow = self.ds.parameters["Nrow"]
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
# Each domain subset will contain a hydro_offset array, which gives
# pointers to level-by-level hydro information
tr = defaultdict(list)
cp = 0
for chunk in chunks:
for subset in chunk.objs:
# Now we read the entire thing
f = open(subset.domain.ds._file_amr, "rb")
# This contains the boundary information, so we skim through
# and pick off the right vectors
rv = subset.fill(f, fields, selector)
for ft, f in fields:
d = rv.pop(f)
mylog.debug(
"Filling %s with %s (%0.3e %0.3e) (%s:%s)",
f,
d.size,
d.min(),
d.max(),
cp,
cp + d.size,
)
tr[(ft, f)].append(d)
cp += d.size
d = {}
for field in fields:
d[field] = np.concatenate(tr.pop(field))
return d
def _get_mask(self, selector, ftype):
key = (selector, ftype)
if key in self.masks.keys() and self.caching:
return self.masks[key]
pstr = "particle_position_%s"
x, y, z = (self._get_field((ftype, pstr % ax)) for ax in "xyz")
mask = selector.select_points(x, y, z, 0.0)
if self.caching:
self.masks[key] = mask
return self.masks[key]
else:
return mask
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
for _chunk in chunks:
for ptype in sorted(ptf):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for _chunk in chunks:
for ptype, field_list in sorted(ptf.items()):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = self._get_field((ptype, field))
yield (ptype, field), data[mask]
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for ax in "xyz":
if fname.startswith(f"particle_position_{ax}"):
dd = self.ds.domain_dimensions[0]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith(f"particle_velocity_{ax}"):
(tr[field],) = rp(fields=["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int64")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
if pbool[-1] and fname in particle_star_fields:
data = read_star_field(self.file_stars, field=fname)
temp = tr.get(field, np.zeros(npa, "f8"))
nstars = self.ls[-1] - self.ls[-2]
if nstars > 0:
temp[-nstars:] = data
tr[field] = temp
if fname == "particle_creation_time":
self.tb, self.ages, data = interpolate_ages(
tr[field][-nstars:],
self.file_stars,
self.tb,
self.ages,
self.ds.current_time,
)
temp = tr.get(field, np.zeros(npa, "f8"))
temp[-nstars:] = data
tr[field] = temp
del data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr = {f: np.array([]) for f in [field]}
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
class IOHandlerDarkMatterART(IOHandlerART):
_dataset_type = "dm_art"
def _count_particles(self, data_file):
return {
k: self.ds.parameters["lspecies"][i]
for i, k in enumerate(self.ds.particle_types_raw)
}
def _identify_fields(self, domain):
field_list = []
self.particle_field_list = [f for f in particle_fields]
for ptype in self.ds.particle_types_raw:
for pfield in self.particle_field_list:
pfn = (ptype, pfield)
field_list.append(pfn)
return field_list, {}
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for ax in "xyz":
if fname.startswith(f"particle_position_{ax}"):
# This is not the same as domain_dimensions
dd = self.ds.parameters["ng"]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith(f"particle_velocity_{ax}"):
(tr[field],) = rp(["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int64")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr[field] = np.array([])
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
def _yield_coordinates(self, data_file):
for ptype in self.ds.particle_types_raw:
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, np.stack((x, y, z), axis=-1)
def _determine_field_size(pf, field, lspecies, ptmax):
pbool = np.zeros(len(lspecies), dtype="bool")
idxas = np.concatenate(
(
[
0,
],
lspecies[:-1],
)
)
idxbs = lspecies
if "specie" in field:
index = int(field.replace("specie", ""))
pbool[index] = True
else:
raise RuntimeError
idxa, idxb = idxas[pbool][0], idxbs[pbool][-1]
return pbool, idxa, idxb
def interpolate_ages(
data, file_stars, interp_tb=None, interp_ages=None, current_time=None
):
if interp_tb is None:
t_stars, a_stars = read_star_field(file_stars, field="t_stars")
# timestamp of file should match amr timestamp
if current_time:
tdiff = YTQuantity(b2t(t_stars), "Gyr") - current_time.in_units("Gyr")
if np.abs(tdiff) > 1e-4:
mylog.info("Timestamp mismatch in star particle header: %s", tdiff)
mylog.info("Interpolating ages")
interp_tb, interp_ages = b2t(data)
interp_tb = YTArray(interp_tb, "Gyr")
interp_ages = YTArray(interp_ages, "Gyr")
temp = np.interp(data, interp_tb, interp_ages)
return interp_tb, interp_ages, temp
def _read_art_level_info(
f, level_oct_offsets, level, coarse_grid=128, ncell0=None, root_level=None
):
pos = f.tell()
f.seek(level_oct_offsets[level])
# Get the info for this level, skip the rest
junk, nLevel, iOct = read_vector(f, "i", ">")
# fortran indices start at 1
# Skip all the oct index data
le = np.zeros((nLevel, 3), dtype="int64")
fl = np.ones((nLevel, 6), dtype="int64")
iocts = np.zeros(nLevel + 1, dtype="int64")
idxa, idxb = 0, 0
chunk = int(1e6) # this is ~111MB for 15 dimensional 64 bit arrays
left = nLevel
while left > 0:
this_chunk = min(chunk, left)
idxb = idxa + this_chunk
data = np.fromfile(f, dtype=">i", count=this_chunk * 15)
data = data.reshape(this_chunk, 15)
left -= this_chunk
le[idxa:idxb, :] = data[:, 1:4]
fl[idxa:idxb, 1] = np.arange(idxa, idxb)
# pad byte is last, LL2, then ioct right before it
iocts[idxa:idxb] = data[:, -3]
idxa = idxa + this_chunk
del data
# emulate fortran code
# do ic1 = 1 , nLevel
# read(19) (iOctPs(i,iOct),i=1,3),(iOctNb(i,iOct),i=1,6),
# & iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct),
# & iOctLL2(iOct)
# iOct = iOctLL1(iOct)
# ioct always represents the index of the next variable
# not the current, so shift forward one index
# the last index isn't used
iocts[1:] = iocts[:-1] # shift
iocts = iocts[:nLevel] # chop off the last, unused, index
iocts[0] = iOct # starting value
# now correct iocts for fortran indices start @ 1
iocts = iocts - 1
assert np.unique(iocts).shape[0] == nLevel
# left edges are expressed as if they were on
# level 15, so no matter what level max(le)=2**15
# correct to the yt convention
# le = le/2**(root_level-1-level)-1
# try to find the root_level first
def cfc(root_level, level, le):
d_x = 1.0 / (2.0 ** (root_level - level + 1))
fc = (d_x * le) - 2 ** (level - 1)
return fc
if root_level is None:
root_level = np.floor(np.log2(le.max() * 1.0 / coarse_grid))
root_level = root_level.astype("int64")
for _ in range(10):
fc = cfc(root_level, level, le)
go = np.diff(np.unique(fc)).min() < 1.1
if go:
break
root_level += 1
else:
fc = cfc(root_level, level, le)
unitary_center = fc / (coarse_grid * 2.0 ** (level - 1))
assert np.all(unitary_center < 1.0)
# again emulate the fortran code
# This is all for calculating child oct locations
# iC_ = iC + nbshift
# iO = ishft ( iC_ , - ndim )
# id = ishft ( 1, MaxLevel - iOctLv(iO) )
# j = iC_ + 1 - ishft( iO , ndim )
# Posx = d_x * (iOctPs(1,iO) + sign ( id , idelta(j,1) ))
# Posy = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
# Posz = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
# idelta = [[-1, 1, -1, 1, -1, 1, -1, 1],
# [-1, -1, 1, 1, -1, -1, 1, 1],
# [-1, -1, -1, -1, 1, 1, 1, 1]]
# idelta = np.array(idelta)
# if ncell0 is None:
# ncell0 = coarse_grid**3
# nchild = 8
# ndim = 3
# nshift = nchild -1
# nbshift = nshift - ncell0
# iC = iocts #+ nbshift
# iO = iC >> ndim #possibly >>
# id = 1 << (root_level - level)
# j = iC + 1 - ( iO << 3)
# delta = np.abs(id)*idelta[:,j-1]
# try without the -1
# le = le/2**(root_level+1-level)
# now read the hvars and vars arrays
# we are looking for iOctCh
# we record if iOctCh is >0, in which it is subdivided
# iOctCh = np.zeros((nLevel+1,8),dtype='bool')
f.seek(pos)
return unitary_center, fl, iocts, nLevel, root_level
def get_ranges(
skip, count, field, words=6, real_size=4, np_per_page=4096 ** 2, num_pages=1
):
# translate every particle index into a file position ranges
ranges = []
arr_size = np_per_page * real_size
idxa, idxb = 0, 0
posa, posb = 0, 0
for _page in range(num_pages):
idxb += np_per_page
for i, fname in enumerate(["x", "y", "z", "vx", "vy", "vz"]):
posb += arr_size
if i == field or fname == field:
if skip < np_per_page and count > 0:
left_in_page = np_per_page - skip
this_count = min(left_in_page, count)
count -= this_count
start = posa + skip * real_size
end = posa + this_count * real_size
ranges.append((start, this_count))
skip = 0
assert end <= posb
else:
skip -= np_per_page
posa += arr_size
idxa += np_per_page
assert count == 0
return ranges
def read_particles(file, Nrow, idxa, idxb, fields):
words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
real_size = 4 # for file_particle_data; not always true?
np_per_page = Nrow ** 2 # defined in ART a_setup.h, # of particles/page
num_pages = os.path.getsize(file) // (real_size * words * np_per_page)
fh = open(file)
skip, count = idxa, idxb - idxa
kwargs = dict(
words=words, real_size=real_size, np_per_page=np_per_page, num_pages=num_pages
)
arrs = []
for field in fields:
ranges = get_ranges(skip, count, field, **kwargs)
data = None
for seek, this_count in ranges:
fh.seek(seek)
temp = np.fromfile(fh, count=this_count, dtype=">f4")
if data is None:
data = temp
else:
data = np.concatenate((data, temp))
arrs.append(data.astype("f8"))
fh.close()
return arrs
def read_star_field(file, field=None):
data = {}
with open(file, "rb") as fh:
for dtype, variables in star_struct:
found = (
isinstance(variables, tuple) and field in variables
) or field == variables
if found:
data[field] = read_vector(fh, dtype[1], dtype[0])
else:
skip(fh, endian=">")
return data.pop(field)
def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars):
f.seek(level_child_offsets[level])
ioctch = np.zeros(nLevel, dtype="uint8")
idc = np.zeros(nLevel, dtype="int32")
chunk = int(1e6)
left = nLevel
width = nhydro_vars + 6
a, b = 0, 0
while left > 0:
chunk = min(chunk, left)
b += chunk
arr = np.fromfile(f, dtype=">i", count=chunk * width)
arr = arr.reshape((width, chunk), order="F")
assert np.all(arr[0, :] == arr[-1, :]) # pads must be equal
idc[a:b] = arr[1, :] - 1 # fix fortran indexing
ioctch[a:b] = arr[2, :] == 0 # if it is above zero, then refined available
# zero in the mask means there is refinement available
a = b
left -= chunk
assert left == 0
return idc, ioctch
nchem = 8 + 2
dtyp = | np.dtype(f">i4,>i8,>i8,>{nchem}f4,>2f4,>i4") | numpy.dtype |
#!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import dask
from dask.distributed import Client
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_transform_warp_smaller():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
bimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY),-1)
bimg_small = cv2.resize(bimg, (200,300)) #not sure why INTER_NEAREST doesn't preserve values
bimg_small[bimg_small>0]=255
mrow, mcol = bimg_small.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug = None
mask_warped = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
pcv.params.debug = 'plot'
mask_warped_plot = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.count_nonzero(mask_warped)==93142
assert np.count_nonzero(mask_warped_plot)==93142
def test_plantcv_transform_warp_larger():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug='print'
mask_warped_print = pcv.transform.warp(gimg_large, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.sum(mask_warped_print)==83103814
def test_plantcv_transform_warp_rgbimgerror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
def test_plantcv_transform_warp_4ptserror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
mrow, mcol, _ = img.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,0], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,1], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,2], img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1),(0,vrow-1)])
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_landmark_reference_pt_dist():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_landmark_reference")
os.mkdir(cache_dir)
points_rescaled = [(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236),
(0.7431, 0.3681), (0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889),
(0.4931, 0.5903), (0.3889, 0.5694), (0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278),
(0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139), (0.5417, 0.2292), (0.7708, 0.3472),
(0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)]
centroid_rescaled = (0.4685, 0.4945)
bottomline_rescaled = (0.4685, 0.2569)
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=[(10, 1000)], centroid_r=(10, 10), bline_r=(10, 10))
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=(0, 0), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=points_rescaled, centroid_r=centroid_rescaled,
bline_r=bottomline_rescaled, label="prefix")
assert len(pcv.outputs.observations['prefix'].keys()) == 8
def test_plantcv_laplace_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_laplace_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = None
pcv.params.debug = None
lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(lp_img), TEST_GRAY_DIM))
def test_plantcv_logical_and():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_and")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
and_img = pcv.logical_and(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(and_img), TEST_BINARY_DIM))
def test_plantcv_logical_or():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_or")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
or_img = pcv.logical_or(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(or_img), TEST_BINARY_DIM))
def test_plantcv_logical_xor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_xor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
xor_img = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(xor_img), TEST_BINARY_DIM))
def test_plantcv_median_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = None
pcv.params.debug = None
blur_img = pcv.median_blur(gray_img=img, ksize=5)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(blur_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(blur_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_median_blur_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.median_blur(img, 5.)
def test_plantcv_naive_bayes_classifier():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = None
pcv.params.debug = None
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(mask), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(mask), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_naive_bayes_classifier_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS_BAD))
def test_plantcv_object_composition():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
_ = pcv.object_composition(img=img, contours=[], hierarchy=object_hierarchy)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Test with debug = None
pcv.params.debug = None
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_object_composition_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_within_frame():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask_ib = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_oob = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_OOB), -1)
in_bounds_ib = pcv.within_frame(mask=mask_ib, border_width=1, label="prefix")
in_bounds_oob = pcv.within_frame(mask=mask_oob, border_width=1)
assert (in_bounds_ib is True and in_bounds_oob is False)
def test_plantcv_within_frame_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
grayscale_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
with pytest.raises(RuntimeError):
_ = pcv.within_frame(grayscale_img)
def test_plantcv_opening():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.opening(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.opening(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.opening(bin_img)
assert np.sum(filtered_img) == 16184595
def test_plantcv_opening_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.opening(rgb_img)
def test_plantcv_output_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False)
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Remove tmp files in working direcctory
shutil.rmtree("ori-images")
shutil.rmtree("mask-images")
# Test with debug = None
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png',
outdir=cache_dir, mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_output_mask_true():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
pcv.params.debug_outdir = cache_dir
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_plot_image_matplotlib_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pimg = pcv.visualize.pseudocolor(gray_img=img, mask=mask, min_value=10, max_value=200)
with pytest.raises(RuntimeError):
pcv.plot_image(pimg)
def test_plantcv_plot_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
try:
pcv.plot_image(img=img)
except RuntimeError:
assert False
# Assert that the image was plotted without error
assert True
def test_plantcv_print_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_bad_type():
with pytest.raises(RuntimeError):
pcv.print_image(img=[], filename="/dev/null")
def test_plantcv_print_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_results(tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
pcv.print_results(filename=outfile)
assert os.path.exists(outfile)
def test_plantcv_readimage_native():
# Test with debug = None
pcv.params.debug = None
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='rgba')
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
# Assert that the image name returned equals the name of the input image
# Assert that the path of the image returned equals the path of the input image
# Assert that the dimensions of the returned image equals the expected dimensions
if img_name == TEST_INPUT_COLOR and path == TEST_DATA:
if all([i == j] for i, j in zip(np.shape(img), TEST_COLOR_DIM)):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_readimage_grayscale():
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="grey")
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="gray")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="rgb")
assert len(np.shape(img)) == 3
def test_plantcv_readimage_rgba_as_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_RGBA), mode="native")
assert np.shape(img)[2] == 3
def test_plantcv_readimage_csv():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_THERMAL_CSV), mode="csv")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_envi():
# Test with debug = None
pcv.params.debug = None
array_data = pcv.readimage(filename=os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA), mode="envi")
if sys.version_info[0] < 3:
assert len(array_data.array_type) == 8
def test_plantcv_readimage_bad_file():
with pytest.raises(RuntimeError):
_ = pcv.readimage(filename=TEST_INPUT_COLOR)
def test_plantcv_readbayer_default_bg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_readbayer_default_bg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
# Test with debug = "plot"
pcv.params.debug = "plot"
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_bad_input():
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, "no-image.png"), bayerpattern="GR", alg="default")
def test_plantcv_rectangle_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rectangle_mask(img=img_color, p1=(0, 0), p2=(2454, 2056), color="gray")
# Test with debug = None
pcv.params.debug = None
masked, hist, contour, heir = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="black")
maskedsum = np.sum(masked)
imgsum = np.sum(img)
assert maskedsum < imgsum
def test_plantcv_rectangle_mask_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="whit")
def test_plantcv_report_size_marker_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_report_size_marker_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
pcv.outputs.clear()
assert len(images) != 0
def test_plantcv_report_size_marker_define():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_grayscale_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# ROI contour
roi_contour = [np.array([[[0, 0]], [[0, 49]], [[49, 49]], [[49, 0]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_bad_marker_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='none',
objcolor='light', thresh_channel='s', thresh=120)
def test_plantcv_report_size_marker_bad_threshold_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel=None, thresh=120)
def test_plantcv_rgb2gray_cmyk():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
c = pcv.rgb2gray_cmyk(rgb_img=img, channel="c")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(c), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_cmyk_bad_channel():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
# Channel S is not in CMYK
_ = pcv.rgb2gray_cmyk(rgb_img=img, channel="s")
def test_plantcv_rgb2gray_hsv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_hsv")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = None
pcv.params.debug = None
s = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(s), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_hsv_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="l")
def test_plantcv_rgb2gray_lab():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_lab")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = None
pcv.params.debug = None
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(b), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_lab_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_lab(rgb_img=img, channel="v")
def test_plantcv_rgb2gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(gray), TEST_GRAY_DIM))
def test_plantcv_roi2mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "plot"
_ = pcv.roi.roi2mask(img=img, contour=obj_contour)
pcv.params.debug = "print"
mask = pcv.roi.roi2mask(img=img, contour=obj_contour)
assert np.shape(mask)[0:2] == np.shape(img)[0:2] and np.sum(mask) == 255
def test_plantcv_roi_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="largest")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="partial")
# Test with debug = None and roi_type = cutto
pcv.params.debug = None
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="cutto")
# Test with debug = None
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy, roi_type="partial")
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_roi_objects_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.roi_objects(img=img, roi_type="cut", roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy)
def test_plantcv_roi_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial", roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy)
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_rotate():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
rotated = pcv.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rotate_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate_gray():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_scale_features():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scale_features")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position='NA')
# Test with debug = None
pcv.params.debug = None
points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(obj=obj_contour, mask=mask,
points=TEST_ACUTE_RESULT,
line_position=50)
assert len(points_rescaled) == 23
def test_plantcv_scale_features_bad_input():
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_scharr_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scharr_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
# Test with debug = "print"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = None
pcv.params.debug = None
scharr_img = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(scharr_img), TEST_GRAY_DIM))
def test_plantcv_shift_img():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_shift_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="bottom")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="right")
# Test with debug = "plot"
_ = pcv.shift_img(img=mask, number=300, side="left")
# Test with debug = None
pcv.params.debug = None
rotated = pcv.shift_img(img=img, number=300, side="top")
imgavg = np.average(img)
shiftavg = np.average(rotated)
assert shiftavg != imgavg
def test_plantcv_shift_img_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=-300, side="top")
def test_plantcv_shift_img_bad_side_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=300, side="starboard")
def test_plantcv_sobel_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = None
pcv.params.debug = None
sobel_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(sobel_img), TEST_GRAY_DIM))
def test_plantcv_stdev_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
pcv.params.debug = "plot"
_ = pcv.stdev_filter(img=img, ksize=11)
pcv.params.debug = "print"
filter_img = pcv.stdev_filter(img=img, ksize=11)
assert (np.shape(filter_img) == np.shape(img))
def test_plantcv_watershed_segmentation():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_watershed_segmentation")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
# Test with debug = None
pcv.params.debug = None
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
assert pcv.outputs.observations['default']['estimated_object_count']['value'] > 9
def test_plantcv_white_balance_gray_16bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_16bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_gray_8bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_8bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_rgb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 5, 5, 5))
def test_plantcv_white_balance_bad_mode_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='histogram', roi=(5, 5, 80, 80))
def test_plantcv_white_balance_bad_input_int():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5., 5, 5, 5))
def test_plantcv_x_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_x_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=np.array([[0, 0], [0, 0]]), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=(), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_small_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_bad_input():
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_x_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_y_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=(), mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[21, 11]], [[159, 155]], [[237, 11]])),
mask=np.array(([[38, 54]], [[144, 169]], [[81, 137]])), img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_small_obj():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.outputs.clear()
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_y_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.y_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_background_subtraction():
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
big_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Testing if background subtraction is actually still working.
# This should return an array whose sum is greater than one
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
fgmask = pcv.background_subtraction(background_image=big_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) > 0)
# The same foreground subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) == 0)
# The same background subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) == 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_debug():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_background_subtraction_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
# Test with debug = "print"
pcv.params.debug = "print"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# Test with debug = "plot"
pcv.params.debug = "plot"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_bad_img_type():
fg_color = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_gray = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND), 0)
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.background_subtraction(background_image=bg_gray, foreground_image=fg_color)
def test_plantcv_background_subtraction_different_sizes():
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
bg_shp = np.shape(bg_img) # type: tuple
bg_img_resized = cv2.resize(bg_img, (int(bg_shp[0] / 2), int(bg_shp[1] / 2)), interpolation=cv2.INTER_AREA)
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img_resized, foreground_image=fg_img)
assert np.sum(fgmask) > 0
def test_plantcv_spatial_clustering_dbscan():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_dbscan")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = "print"
_ = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
pcv.params.debug = "plot"
spmask = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_optics():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_optics")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
spmask = pcv.spatial_clustering(img, algorithm="OPTICS", min_cluster_size=100, max_distance=5000)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_badinput():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
with pytest.raises(NameError):
_ = pcv.spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
# ##############################
# Tests for the learn subpackage
# ##############################
def test_plantcv_learn_naive_bayes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
maskdir = os.path.join(cache_dir, "masks")
if not os.path.exists(imgdir):
os.mkdir(imgdir)
if not os.path.exists(maskdir):
os.mkdir(maskdir)
# Copy and image and mask to the image/mask directories
shutil.copyfile(os.path.join(TEST_DATA, TEST_VIS_SMALL), os.path.join(imgdir, "image.png"))
shutil.copyfile(os.path.join(TEST_DATA, TEST_MASK_SMALL), os.path.join(maskdir, "image.png"))
# Run the naive Bayes training module
outfile = os.path.join(cache_dir, "naive_bayes_pdfs.txt")
plantcv.learn.naive_bayes(imgdir=imgdir, maskdir=maskdir, outfile=outfile, mkplots=True)
assert os.path.exists(outfile)
def test_plantcv_learn_naive_bayes_multiclass():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes_multiclass")
os.mkdir(cache_dir)
# Run the naive Bayes multiclass training module
outfile = os.path.join(cache_dir, "naive_bayes_multiclass_pdfs.txt")
plantcv.learn.naive_bayes_multiclass(samples_file=os.path.join(TEST_DATA, TEST_SAMPLED_RGB_POINTS), outfile=outfile,
mkplots=True)
assert os.path.exists(outfile)
# ####################################
# Tests for the morphology subpackage
# ####################################
def test_plantcv_morphology_segment_curvature():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_curvature")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects, label="prefix")
pcv.params.debug = "plot"
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects)
assert len(pcv.outputs.observations['default']['segment_curvature']['value']) == 22
def test_plantcv_morphology_check_cycles():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "print"
_ = pcv.morphology.check_cycles(mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.check_cycles(mask)
pcv.params.debug = None
_ = pcv.morphology.check_cycles(mask)
assert pcv.outputs.observations['default']['num_cycles']['value'] == 1
def test_plantcv_morphology_find_branch_pts():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton)
pcv.params.debug = None
branches = pcv.morphology.find_branch_pts(skel_img=skeleton)
assert np.sum(branches) == 9435
def test_plantcv_morphology_find_tips():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_tips")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_tips(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_tips(skel_img=skeleton)
pcv.params.debug = None
tips = pcv.morphology.find_tips(skel_img=skeleton)
assert np.sum(tips) == 9435
def test_plantcv_morphology_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.prune(skel_img=skeleton, size=1)
pcv.params.debug = "plot"
_ = pcv.morphology.prune(skel_img=skeleton, size=1, mask=skeleton)
pcv.params.debug = None
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_prune_size0():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=0)
assert np.sum(pruned_img) == np.sum(skeleton)
def test_plantcv_morphology_iterative_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img = pcv.morphology._iterative_prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_segment_skeleton():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_skeleton")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.segment_skeleton(skel_img=skeleton, mask=mask)
pcv.params.debug = "plot"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
assert len(segment_objects) == 73
def test_plantcv_morphology_fill_segments():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_fill_segments")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
pcv.params.debug = "print"
_ = pcv.morphology.fill_segments(mask, obj, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.fill_segments(mask, obj)
tests = [pcv.outputs.observations['default']['segment_area']['value'][42] == 5529,
pcv.outputs.observations['default']['segment_area']['value'][20] == 5057,
pcv.outputs.observations['default']['segment_area']['value'][49] == 3323]
assert all(tests)
def test_plantcv_morphology_fill_segments_with_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_fill_segments")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
stem_obj = obj[0:4]
pcv.params.debug = "print"
_ = pcv.morphology.fill_segments(mask, obj, stem_obj)
num_objects = len(pcv.outputs.observations['default']['leaf_area']['value'])
assert num_objects == 70
def test_plantcv_morphology_segment_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 22
def test_plantcv_morphology_segment_angle_overflow():
# Clear previous outputs
pcv.outputs.clear()
# Don't prune, would usually give overflow error without extra if statement in segment_angle
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 73
def test_plantcv_morphology_segment_euclidean_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_eu_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_eu_length']['value']) == 22
def test_plantcv_morphology_segment_euclidean_length_bad_input():
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skel = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skel)
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
def test_plantcv_morphology_segment_path_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_path_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_path_length']['value']) == 22
def test_plantcv_morphology_skeletonize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_skeletonize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
input_skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = "plot"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
skeleton = pcv.morphology.skeletonize(mask=mask)
arr = np.array(skeleton == input_skeleton)
assert arr.all()
def test_plantcv_morphology_segment_sort():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_sort")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.params.debug = "print"
_ = pcv.morphology.segment_sort(skeleton, seg_objects, mask=skeleton)
pcv.params.debug = "plot"
leaf_obj, stem_obj = pcv.morphology.segment_sort(skeleton, seg_objects)
assert len(leaf_obj) == 36
def test_plantcv_morphology_segment_tangent_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2)
assert len(pcv.outputs.observations['default']['segment_tangent_angle']['value']) == 73
def test_plantcv_morphology_segment_id():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_id(skel, objs)
pcv.params.debug = "plot"
_, labeled_img = pcv.morphology.segment_id(skel, objs, mask=skel)
assert np.sum(labeled_img) > np.sum(skel)
def test_plantcv_morphology_segment_insertion_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 3, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
assert pcv.outputs.observations['default']['segment_insertion_angle']['value'][:6] == ['NA', 'NA', 'NA',
24.956918822001636,
50.7313343343401,
56.427712102130734]
def test_plantcv_morphology_segment_insertion_angle_bad_stem():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
stem_obj = [leaf_obj[0], leaf_obj[10]]
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
def test_plantcv_morphology_segment_combine():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
# Test with list of IDs input
_, new_objects = pcv.morphology.segment_combine([0, 1], seg_objects, skel)
assert len(new_objects) + 1 == len(seg_objects)
def test_plantcv_morphology_segment_combine_lists():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "print"
# Test with list of lists input
_, new_objects = pcv.morphology.segment_combine([[0, 1, 2], [3, 4]], seg_objects, skel)
assert len(new_objects) + 3 == len(seg_objects)
def test_plantcv_morphology_segment_combine_bad_input():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_, new_objects = pcv.morphology.segment_combine([0.5, 1.5], seg_objects, skel)
def test_plantcv_morphology_analyze_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_analyze_stem")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, segmented_img, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == -12.531776428222656
def test_plantcv_morphology_analyze_stem_bad_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
_, _ = pcv.morphology.segment_sort(pruned, seg_objects)
# print([stem_obj[3]])
# stem_obj = [stem_obj[3]]
stem_obj = [[[[1116, 1728]], [[1116, 1]]]]
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == 22877334.0
# ########################################
# Tests for the hyperspectral subpackage
# ########################################
def test_plantcv_hyperspectral_read_data_default():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_read_data_default")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
pcv.params.debug = "print"
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_no_default_bands():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_NO_DEFAULT)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_approx_pseudorgb():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_APPROX_PSEUDO)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_spectral_index_ndvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_gdvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_gdvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_gdvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.gdvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_savi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_savi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_savi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.savi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ci_rededge():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ci_rededge")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ci_rededge_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ci_rededge(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri550():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri550")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri550_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri550(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri700():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri700")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri700_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri700(hsi=index_array, distance=20)
def test_plantcv_spectral_index_egi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_egi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
index_array = pcv.spectral_index.egi(rgb_img=rgb_img)
assert np.shape(index_array.array_data) == (2056, 2454) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_evi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.evi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mcari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mcari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and | np.nanmax(index_array.pseudo_rgb) | numpy.nanmax |
#!/usz/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
"""
@file : KNN_brute.py
@author : zgf
@brief : KNN算法自编程实现
@attention : life is short,I need python
"""
def draw(X_train, y_train, X_new):
# 正负实例点初始化
X_po = | np.zeros(X_train.shape[1]) | numpy.zeros |
# ported from MATLAB/Sandbox/GSpline/getExtraOrdCornerIndexMask.m
import numpy as np
from helper_functions import get_num_edges_meeting
from checkB1B2OrientationReversal import checkB1B2OrientationReversal
from checkB1B2Reversal_opt import checkB1B2Reversal_opt
def getExtraOrdCornerIndexMask(quad_list,AVertexList,B1VertexList,B2VertexList,CVertexList,quad_control_point_indices,quad_index,whichCorner):
# TODO: Understand and change code
mod_index = lambda i, modul: (i)%modul
shifted_indices = lambda ind, modul: mod_index(np.array(range(modul)) + ind,modul)
reverse_shifted_indices = lambda ind, modul: mod_index( | np.arange(modul,0,-1) | numpy.arange |
## Here is the code to generate the bounding box from the heatmap
#
# to reproduce the ILSVRC localization result, you need to first generate
# the heatmap for each testing image by merging the heatmap from the
# 10-crops (it is exactly what the demo code is doing), then resize the merged heatmap back to the original size of
# that image. Then use this bbox generator to generate the bbox from the resized heatmap.
#
# The source code of the bbox generator is also released. Probably you need
# to install the correct version of OpenCV to compile it.
#
# Special thanks to <NAME> for helping on this code.
#
# <NAME>, April 19, 2016
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def im2double(im):
return cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
# bbox_threshold = [20, 100, 110] # parameters for the bbox generator
# curParaThreshold = str(bbox_threshold[0])+' '+str(bbox_threshold[1])+' '+str(bbox_threshold[2])+' '
# curHeatMapFile = '/dccstor/alfassy/initial_layers/CAM_Python_master/bboxgenerator/heatmap_6.jpg'
# curImgFile = '/dccstor/alfassy/initial_layers/CAM_Python_master/bboxgenerator/sample_6.jpg'
# curBBoxFile = '/dccstor/alfassy/initial_layers/CAM_Python_master/bboxgenerator/heatmap_6Test.txt'
#
# os.system("/dccstor/alfassy/initial_layers/CAM_Python_master/bboxgenerator/./dt_box "+curHeatMapFile+' '+curParaThreshold+' '+curBBoxFile)
def gen_bbox_img(curImgFile, curBBoxFile, out_path):
with open(curBBoxFile) as f:
for line in f:
items = [int(x) for x in line.strip().split()]
boxData1 = np.array(items[0::4]).T
boxData2 = | np.array(items[1::4]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
# Load temperature data only
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
sim_temperature=sim_weather[cities]
# Convert temperatures to Fahrenheit
sim_temperature= (sim_temperature*(9/5))+32
sim_temperature=sim_temperature.values
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_cities = len(cities)
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_sums = np.reshape(np.sum(BPA,axis= 0).values,(1,num_BPA))
CA_sums = np.reshape(np.sum(CA,axis=0).values,(1,num_CA))
WB_sums = np.reshape(np.sum(WB,axis=0).values,(1,num_Will))
HO_sums = np.reshape(np.sum(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = np.column_stack((BPA_sums,CA_sums,WB_sums,HO_sums))
if y_index < 1:
hist_totals = joined
else:
hist_totals = np.vstack((hist_totals,joined))
BPA_headers = np.reshape(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = np.reshape(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = np.reshape(list(Willamette_streamflow.loc[:,'Albany':]),(1,num_Will))
HO_headers = np.reshape(['Hoover'],(1,1))
headers = np.column_stack((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
added_value=abs(np.min((df_hist_totals)))+5
log_hist_total=np.log(df_hist_totals+abs(added_value))
A=df_hist_totals.values
B=np.column_stack((A,annual_HDD,annual_CDD))
x,y=np.shape(B)
#data is the data matrix at all time step. The dimention would be X*Y
#data 2 is required if calculating disimilarity
#Step 1: Transform the data into emperical CDF
P=np.zeros((x,y))
for i in range(0,y):
ECDF=edis.ECDF(B[:,i])
P[:,i]=ECDF(B[:,i])
Y=2*(P-0.5)
new_cols = ['Name'] + ['type_' + str(i) for i in range(0,141)]
#remove constant zeros columns
need_to_remove=[1,17,22,24,27,32,34,36,37,38,44,107,108,109]
Y2=np.delete(Y,need_to_remove,axis=1)
Y[:,107]=1
mean=np.mean(Y,axis=0)
cov=np.cov(Y,rowvar=0)
runs=int(num_sim_days/365)*5
sim_years=int(num_sim_days/365)
N = np.random.multivariate_normal(mean,cov,runs)
T=(N/2)+0.5
T_all=np.zeros((runs,y))
for i in range(0,y):
for j in range(0,runs):
if T[j,i] <0:
T_all[j,i]=(np.percentile(B[:,i],q=0*100))*(1+T[j,i])
elif T[j,i] <=1 and T[j,i] >=0:
T_all[j,i]=np.percentile(B[:,i],q=T[j,i]*100)
else:
T_all[j,i]=(np.percentile(B[:,i],q=1*100))*T[j,i]
Sim_total=T_all[:,:112]
Sim_HDD_CDD=T_all[:,112:]
Sim_CDD=Sim_HDD_CDD[:,15:]
Sim_HDD=Sim_HDD_CDD[:,:15]
######################################
#sns.kdeplot(annual_CDD[:,0],label='His')
#sns.kdeplot(annual_CDD_sim[:,0],label='Syn')
#sns.kdeplot(Sim_HDD_CDD[:,15],label='Capula')
#plt.legend()
#
#sns.kdeplot(annual_HDD[:,0],label='His')
#sns.kdeplot(annual_HDD_sim[:,0],label='Syn')
#sns.kdeplot(Sim_HDD_CDD[:,0],label='Capula')
#plt.legend()
#########################################
HDD_CDD=np.column_stack((annual_HDD_sim,annual_CDD_sim))
year_list=np.zeros(int(num_sim_days/365))
Best_RMSE = 9999999999
CHECK=np.zeros((sim_years,runs))
for i in range(0,sim_years):
for j in range(0,runs):
RMSE = (np.sum(np.abs(HDD_CDD[i,:]-Sim_HDD_CDD[j,:])))
CHECK[i,j]=RMSE
if RMSE <= Best_RMSE:
year_list[i] = j
Best_RMSE=RMSE
else:
pass
Best_RMSE = 9999999999
sim_totals=np.zeros((sim_years,num_gages))
for i in range(0,sim_years):
sim_totals[i,:] = Sim_total[int(year_list[i]),:]
###################################################################################
#C_1=np.corrcoef(sim_totals,rowvar=0)
#C_his=np.corrcoef(A,rowvar=0)
#import seaborn as sns; sns.set()
#
#grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
#fig,ax=plt.subplots()
#plt.rcParams["font.weight"] = "bold"
#plt.rcParams["axes.labelweight"] = "bold"
#ax1=plt.subplot(121)
#sns.heatmap(C_1,vmin=0,vmax=1,cbar=False)
#plt.axis('off')
#ax.set_title('Syn')
#
#
#
#ax2=plt.subplot(122)
#cbar_ax = fig.add_axes([.92, .15, .03, .7]) # <-- Create a colorbar axes
#
#fig2=sns.heatmap(C_his,ax=ax2,cbar_ax=cbar_ax,vmin=0,vmax=1)
#cbar=ax2.collections[0].colorbar
#cbar.ax.tick_params(labelsize='large')
#
#fig2.axis('off')
#
#
#
##################################################################################
#plt.figure()
#sns.kdeplot(A[:,0],label='His')
#sns.kdeplot(sim_totals[:,0],label='Syn')
#sns.kdeplot(Sim_total[:,0],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,5],label='His')
#sns.kdeplot(sim_totals[:,5],label='Syn')
#sns.kdeplot(Sim_total[:,5],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,52],label='His')
#sns.kdeplot(sim_totals[:,52],label='Syn')
#sns.kdeplot(Sim_total[:,52],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,55],label='His')
#sns.kdeplot(sim_totals[:,55],label='Syn')
#sns.kdeplot(Sim_total[:,55],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,56],label='His')
#sns.kdeplot(sim_totals[:,56],label='Syn')
#sns.kdeplot(Sim_total[:,56],label='Capula')
#plt.legend()
#
#plt.figure()
#sns.kdeplot(A[:,66],label='His')
#sns.kdeplot(sim_totals[:,66],label='Syn')
#sns.kdeplot(Sim_total[:,66],label='Capula')
#plt.legend()
##################################################################################
# impose logical constraints
mins = np.min(df_hist_totals.loc[:,:'Hoover'],axis=0)
for i in range(0,num_gages):
lower_bound = mins[i]
for j in range(0,sim_years):
if sim_totals[j,i] < lower_bound:
sim_totals[j,i] = lower_bound*np.random.uniform(0,1)
df_sim_totals = pd.DataFrame(sim_totals)
H = list(headers)
df_sim_totals.columns = H
#A1=[]
#A2=[]
#for h in H:
# a1=np.average(df_hist_totals.loc[:,h])
# a2=np.average(df_sim_totals.loc[:,h])
# A1.append(a1)
# A2.append(a2)
#
#plt.plot(A1)
#plt.plot(A2)
#####################################################################################
# This section selects daily fractions which are paired with
# annual totals to arrive at daily streamflows
# 4 cities are nearest to all 109 stream gage sites
Fraction_calculation_cities=['Spokane','Boise','Sacramento','Fresno']
# Each is weighted by average annual flow at nearby gage sites
Temperature_weights=pd.read_excel('Synthetic_streamflows/city_weights.xlsx',header=0)
# historical temperatures for those 4 cities
fraction_hist_temp=df_temp[Fraction_calculation_cities]
fraction_hist_temp_matrix=fraction_hist_temp.values
# calculate daily record of weighted temperatures across 4 cities
weighted_T=np.zeros(len(fraction_hist_temp_matrix))
for i in range(0,len(fraction_hist_temp_matrix)):
weighted_T[i]=fraction_hist_temp_matrix[i,0]*Temperature_weights['Spokane'] + fraction_hist_temp_matrix[i,1] * Temperature_weights['Boise'] + fraction_hist_temp_matrix[i,2] * Temperature_weights['Sacramento'] + fraction_hist_temp_matrix[i,3]*Temperature_weights['Fresno']
# synthetic temperatures for each of the cities
fcc = list(['SPOKANE_T','BOISE_T','SACRAMENTO_T','FRESNO_T'])
fraction_sim=sim_weather[fcc]
fraction_sim_matrix=fraction_sim.values
weighted_T_sim=np.zeros(len(fraction_sim_matrix))
# calculate synthetic weighted temperature (in Fahrenheit)
for i in range(0,len(fraction_sim_matrix)):
weighted_T_sim[i]=fraction_sim_matrix[i,0]*Temperature_weights['Spokane'] + fraction_sim_matrix[i,1] * Temperature_weights['Boise'] + fraction_sim_matrix[i,2] * Temperature_weights['Sacramento'] + fraction_sim_matrix[i,3]*Temperature_weights['Fresno']
weighted_T_sim=(weighted_T_sim * (9/5)) +32
#Sample synthetic fractions, then combine with totals
sim_years=int(len(fraction_sim_matrix)/365)
sim_T=np.zeros((365,sim_years))
hist_years=int(len(fraction_hist_temp)/365)
hist_T=np.zeros((365,hist_years))
# reshape historical and simulated weighted temperatures in new variables
for i in range(0,hist_years):
hist_T[:,i] = weighted_T[i*365:365+(i*365)]
for i in range(0,sim_years):
sim_T[:,i] = weighted_T_sim[i*365:365+(i*365)]
# aggregate weighted temperatures into monthly values
Normal_Starting=datetime(1900,1,1)
datelist=pd.date_range(Normal_Starting,periods=365)
count=0
m=np.zeros(365)
for i in range(0,365):
m[i]=int(datelist[count].month)
count= count +1
if count >364:
count=0
hist_T_monthly=np.column_stack((hist_T,m))
monthly_hist_T=np.zeros((12,hist_years))
for i in range(0,sim_years):
for j in range(1,13):
d1=hist_T_monthly[hist_T_monthly[:,hist_years]==j]
d2=d1[:,:hist_years]
monthly_hist_T[j-1,:]=np.sum(d2,axis=0)
Normal_Starting=datetime(1900,1,1)
datelist=pd.date_range(Normal_Starting,periods=365)
count=0
m=np.zeros(365)
for i in range(0,365):
m[i]=int(datelist[count].month)
count= count +1
if count >364:
count=0
sim_T_monthly=np.column_stack((sim_T,m))
monthly_sim_T=np.zeros((12,sim_years))
for i in range(0,sim_years):
for j in range(1,13):
d1=sim_T_monthly[sim_T_monthly[:,sim_years]==j]
d2=d1[:,:sim_years]
monthly_sim_T[j-1,:]=np.sum(d2,axis=0)
# select historical year with most similar spring and summer temperatures
# to new simulated years
year_list=np.zeros(sim_years)
Best_RMSE = 9999999999
CHECK=np.zeros((sim_years,hist_years))
for i in range(0,sim_years):
for j in range(0,hist_years):
RMSE = (np.sum(np.abs(monthly_sim_T[3:8,i]-monthly_hist_T[3:8,j])))
CHECK[i,j]=RMSE
if RMSE <= Best_RMSE:
year_list[i] = j
Best_RMSE=RMSE
else:
pass
Best_RMSE = 9999999999
################################################################################
#Generate streamflow
TDA=np.zeros((int(365*sim_years),2))
totals_hist=np.zeros((num_gages,hist_years))
fractions_hist=np.zeros((hist_years,365,num_gages))
totals_hist_hoover=np.zeros((1,hist_years))
output_BPA= | np.zeros((sim_years*365,num_BPA)) | numpy.zeros |
import os
from imutils import paths
import numpy as np
import xml.etree.ElementTree as ET
from scipy import stats
from xml.dom import minidom
# The paramater of the function is a path that contains the predictions of the
def nonMaximumSupression(detections_path):
output_path = detections_path[:detections_path.rfind("/")]
listdirmodels = [ p for p in os.listdir(detections_path) if "detection" in p]
annotationsFiles = list(paths.list_files(os.path.join(listdirmodels[0]), validExts=(".xml")))
for an in annotationsFiles:
boxes = []
classesBoxes = []
fileName = an.split("/")[-1]
# boxes += extractBoxes(an)
for dir in listdirmodels:
if os.path.isdir(dir):
ImageBoxes, classesB = extractBoxes(os.path.join(dir,fileName))
if len(ImageBoxes)!=0:
boxes = boxes + ImageBoxes
classesBoxes = classesBoxes + classesB
# boxes=[extractBoxes(os.path.join(dir,fileName)) for dir in listdirmodels if os.path.isdir(dir)]
boxes = np.array(boxes)
classesBoxes = np.array(classesBoxes)
if(len(boxes)!=0):
boxes, modes = non_max_suppression_fast(boxes,classesBoxes,0.45)
if not os.path.exists(os.path.join(output_path,"detectionEnsemble")):
os.makedirs(os.path.join(output_path,"detectionEnsemble"))
xml =generateXML(an, boxes, modes, "detectionEnsemble")
file = open(os.path.join(".","detectionEnsemble",fileName),'w')
file.write(xml)
def extractBoxes(annotation_path):
boxes = []
classes = []
doc = ET.parse(annotation_path)
doc = doc.getroot()
objects = doc.findall("object")
for o in objects:
box = []
bndBox = o.find('bndbox')
name = o.find('name').text
confidence = o.find('confidence').text
box.append(int(bndBox.find('xmin').text))
box.append(int(bndBox.find('ymin').text))
box.append(int(bndBox.find('xmax').text))
box.append(int(bndBox.find('ymax').text))
classes.append(name)
box.append(float(confidence))
boxes.append(box)
return boxes,classes
# Malisiewicz et al.
def non_max_suppression_fast(boxes,classesBoxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
modes = []
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
# i es el indice del elemento que se mantiene
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxDeleted = np.concatenate(([last], np.where(overlap > overlapThresh)[0]))
auxidxs = np.append(idxDeleted, i)
x = []
for j in auxidxs:
x.append(classesBoxes[j])
mode = stats.mode(x)
idxs = | np.delete(idxs,idxDeleted) | numpy.delete |
"""
The effect of Sampling
----------------------
This figure shows the effect of sampling on a light curve. We generate data
from a single sinusoid with a sampling rate equivalent to one of the LINEAR
light curves, and show the observed power and the window function power
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import lomb_scargle
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate the data
np.random.seed(42)
t_obs = 100 * np.random.random(40) # 40 observations in 100 days
y_obs1 = np.sin(np.pi * t_obs / 3)
dy1 = 0.1 + 0.1 * np.random.random(y_obs1.shape)
y_obs1 += | np.random.normal(0, dy1) | numpy.random.normal |
import cmor
import logging
import netCDF4
import numpy
import os
import cmor_target
import cmor_task
import cmor_utils
from datetime import datetime, timedelta
timeshift = timedelta(0)
# Apply timeshift for instance in case you want manually to add a shift for the piControl:
# timeshift = datetime(2260,1,1) - datetime(1850,1,1)
# Logger object
log = logging.getLogger(__name__)
extra_axes = {"basin": {"ncdim": "3basin",
"ncvals": ["global_ocean", "atlantic_arctic_ocean", "indian_pacific_ocean"]},
"typesi": {"ncdim": "ncatice"},
"iceband": {"ncdim": "ncatice",
"ncunits": "m",
"ncvals": [0.277, 0.7915, 1.635, 2.906, 3.671],
"ncbnds": [0., 0.454, 1.129, 2.141, 3.671, 99.0]}}
# Experiment name
exp_name_ = None
# Reference date
ref_date_ = None
# Table root
table_root_ = None
# Files that are being processed in the current execution loop.
nemo_files_ = []
# Nemo bathymetry file
bathy_file_ = None
# Nemo bathymetry grid
bathy_grid_ = "opa_grid_T_2D"
# Nemo basin file
basin_file_ = None
# Nemo subbasin grid
basin_grid_ = "opa_grid_T_2D"
# Dictionary of NEMO grid type with cmor grid id.
grid_ids_ = {}
# List of depth axis ids with cmor grid id.
depth_axes_ = {}
# Dictionary of output frequencies with cmor time axis id.
time_axes_ = {}
# Dictionary of sea-ice output types, 1 by default.
type_axes_ = {}
# Dictionary of latitude axes ids for meridional variables.
lat_axes_ = {}
# Dictionary of masks
nemo_masks_ = {}
# Initializes the processing loop.
def initialize(path, expname, tableroot, refdate):
global log, nemo_files_, bathy_file_, basin_file_, exp_name_, table_root_, ref_date_
exp_name_ = expname
table_root_ = tableroot
ref_date_ = refdate
nemo_files_ = cmor_utils.find_nemo_output(path, expname)
expdir = os.path.abspath(os.path.join(os.path.realpath(path), "..", "..", ".."))
ofxdir = os.path.abspath(os.path.join(os.path.realpath(path), "..", "ofx-data"))
bathy_file_ = os.path.join(ofxdir, "bathy_meter.nc")
if not os.path.isfile(bathy_file_):
# Look in env or ec-earth run directory
bathy_file_ = os.environ.get("ECE2CMOR3_NEMO_BATHY_METER", os.path.join(expdir, "bathy_meter.nc"))
if not os.path.isfile(bathy_file_):
log.warning("Nemo bathymetry file %s does not exist...variable deptho in Ofx will be dismissed "
"whenever encountered" % bathy_file_)
bathy_file_ = None
basin_file_ = os.path.join(ofxdir, "subbasins.nc")
if not os.path.isfile(basin_file_):
# Look in env or ec-earth run directory
basin_file_ = os.environ.get("ECE2CMOR3_NEMO_SUBBASINS", os.path.join(expdir, "subbasins.nc"))
if not os.path.isfile(basin_file_):
log.warning("Nemo subbasin file %s does not exist...variable basin in Ofx will be dismissed "
"whenever encountered" % basin_file_)
basin_file_ = None
return True
# Resets the module globals.
def finalize():
global nemo_files_, grid_ids_, depth_axes_, time_axes_
nemo_files_ = []
grid_ids_ = {}
depth_axes_ = {}
time_axes_ = {}
# Executes the processing loop.
def execute(tasks):
global log, time_axes_, depth_axes_, table_root_
log.info("Looking up variables in files...")
tasks = lookup_variables(tasks)
log.info("Creating NEMO grids in CMOR...")
create_grids(tasks)
log.info("Creating NEMO masks...")
create_masks(tasks)
log.info("Executing %d NEMO tasks..." % len(tasks))
log.info("Cmorizing NEMO tasks...")
task_groups = cmor_utils.group(tasks, lambda tsk1: getattr(tsk1, cmor_task.output_path_key, None))
for filename, task_group in task_groups.iteritems():
dataset = netCDF4.Dataset(filename, 'r')
task_sub_groups = cmor_utils.group(task_group, lambda tsk2: tsk2.target.table)
for table, task_list in task_sub_groups.iteritems():
log.info("Start cmorization of %s in table %s" % (','.join([t.target.variable for t in task_list]), table))
try:
tab_id = cmor.load_table("_".join([table_root_, table]) + ".json")
cmor.set_table(tab_id)
except Exception as e:
log.error("CMOR failed to load table %s, skipping variables %s. Reason: %s"
% (table, ','.join([tsk3.target.variable for tsk3 in task_list]), e.message))
continue
if table not in time_axes_:
log.info("Creating time axes for table %s from data in %s..." % (table, filename))
create_time_axes(dataset, task_list, table)
if table not in depth_axes_:
log.info("Creating depth axes for table %s from data in %s ..." % (table, filename))
create_depth_axes(dataset, task_list, table)
if table not in type_axes_:
log.info("Creating extra axes for table %s from data in %s ..." % (table, filename))
create_type_axes(dataset, task_list, table)
for task in task_list:
execute_netcdf_task(dataset, task)
dataset.close()
def lookup_variables(tasks):
valid_tasks = []
for task in tasks:
if (task.target.table, task.target.variable) == ("Ofx", "deptho"):
if bathy_file_ is None:
log.error("Could not use bathymetry file for variable deptho in table Ofx: task skipped.")
task.set_failed()
else:
setattr(task, cmor_task.output_path_key, bathy_file_)
valid_tasks.append(task)
continue
if (task.target.table, task.target.variable) == ("Ofx", "basin"):
if basin_file_ is None:
log.error("Could not use subbasin file for variable basin in table Ofx: task skipped.")
task.set_failed()
else:
setattr(task, cmor_task.output_path_key, basin_file_)
valid_tasks.append(task)
continue
file_candidates = select_freq_files(task.target.frequency, task.target.variable)
results = []
for ncfile in file_candidates:
ds = netCDF4.Dataset(ncfile)
if task.source.variable() in ds.variables:
results.append(ncfile)
ds.close()
if len(results) == 0:
log.error('Variable {:20} in table {:10} was not found in the NEMO output files: task skipped.'
.format(task.source.variable(), task.target.table))
task.set_failed()
continue
if len(results) > 1:
log.error("Variable %s needed for %s in table %s was found in multiple NEMO output files %s... "
"dismissing task" % (task.source.variable(), task.target.variable, task.target.table,
','.join(results)))
task.set_failed()
continue
setattr(task, cmor_task.output_path_key, results[0])
valid_tasks.append(task)
return valid_tasks
def create_basins(target, dataset):
meanings = {"atlmsk": "atlantic_ocean", "indmsk": "indian_ocean", "pacmsk": "pacific_ocean"}
flagvals = [int(s) for s in getattr(target, "flag_values", "").split()]
basins = getattr(target, "flag_meanings", "").split()
data = numpy.copy(dataset.variables["glomsk"][...])
missval = int(getattr(target, cmor_target.int_missval_key))
data[data > 0] = missval
for var, basin in meanings.iteritems():
if var in dataset.variables.keys() and basin in basins:
flagval = flagvals[basins.index(basin)]
arr = dataset.variables[var][...]
data[arr > 0] = flagval
return data, dataset.variables["glomsk"].dimensions, missval
# Performs a single task.
def execute_netcdf_task(dataset, task):
global log
task.status = cmor_task.status_cmorizing
grid_axes = [] if not hasattr(task, "grid_id") else [getattr(task, "grid_id")]
z_axes = getattr(task, "z_axes", [])
t_axes = [] if not hasattr(task, "time_axis") else [getattr(task, "time_axis")]
type_axes = [getattr(task, dim + "_axis") for dim in type_axes_.get(task.target.table, {}).keys() if
hasattr(task, dim + "_axis")]
# TODO: Read axes order from netcdf file!
axes = grid_axes + z_axes + type_axes + t_axes
srcvar = task.source.variable()
if task.target.variable == "basin":
ncvar, dimensions, missval = create_basins(task.target, dataset)
else:
ncvar = dataset.variables[srcvar]
dimensions = ncvar.dimensions
missval = getattr(ncvar, "missing_value", getattr(ncvar, "_FillValue", numpy.nan))
varid = create_cmor_variable(task, srcvar, ncvar, axes)
time_dim, index, time_sel = -1, 0, None
for d in dimensions:
if d.startswith("time"):
time_dim = index
break
index += 1
time_sel = None
if len(t_axes) > 0 > time_dim:
for d in dataset.dimensions:
if d.startswith("time"):
time_sel = range(len(d)) # ensure copying of constant fields
break
if len(grid_axes) == 0: # Fix for global averages/sums
vals = numpy.ma.masked_equal(ncvar[...], missval)
ncvar = numpy.mean(vals, axis=(1, 2))
factor, term = get_conversion_constants(getattr(task, cmor_task.conversion_key, None))
log.info('Cmorizing variable {:20} in table {:7} in file {}'
.format(srcvar, task.target.table, getattr(task, cmor_task.output_path_key)))
mask = getattr(task.target, cmor_target.mask_key, None)
if mask is not None:
mask = nemo_masks_.get(mask, None)
cmor_utils.netcdf2cmor(varid, ncvar, time_dim, factor, term,
missval=getattr(task.target, cmor_target.missval_key, missval),
time_selection=time_sel,
mask=mask)
cmor.close(varid, file_name=True)
task.status = cmor_task.status_cmorized
# Returns the constants A,B for unit conversions of type y = A*x + B
def get_conversion_constants(conversion):
global log
if not conversion:
return 1.0, 0.0
if conversion == "tossqfix":
return 1.0, 0.0
if conversion == "frac2percent":
return 100.0, 0.0
if conversion == "percent2frac":
return 0.01, 0.0
if conversion == "K2degC":
return 1.0, -273.15
if conversion == "degC2K":
return 1.0, 273.15
if conversion == "sv2kgps":
return 1.e+9, 0.
log.error("Unknown explicit unit conversion %s will be ignored" % conversion)
return 1.0, 0.0
# Creates a variable in the cmor package
def create_cmor_variable(task, srcvar, ncvar, axes):
unit = getattr(ncvar, "units", None)
if (not unit) or hasattr(task, cmor_task.conversion_key): # Explicit unit conversion
unit = getattr(task.target, "units")
if hasattr(task.target, "positive") and len(task.target.positive) != 0:
return cmor.variable(table_entry=str(task.target.variable), units=str(unit), axis_ids=axes,
original_name=str(srcvar), positive=getattr(task.target, "positive"))
else:
return cmor.variable(table_entry=str(task.target.variable), units=str(unit), axis_ids=axes,
original_name=str(srcvar))
# Creates all depth axes for the given table from the given files
def create_depth_axes(ds, tasks, table):
global depth_axes_
if table not in depth_axes_:
depth_axes_[table] = {}
log.info("Creating depth axes for table %s using file %s..." % (table, ds.filepath()))
table_depth_axes = depth_axes_[table]
other_nc_axes = ["time_counter", "x", "y"] + [extra_axes[k]["ncdim"] for k in extra_axes.keys()]
for task in tasks:
z_axes = []
if task.source.variable() in ds.variables:
z_axes = [d for d in ds.variables[task.source.variable()].dimensions if d not in other_nc_axes]
z_axis_ids = []
for z_axis in z_axes:
if z_axis not in ds.variables:
log.error("Cannot find variable %s in %s for vertical axis construction" % (z_axis, ds.filepath()))
continue
zvar = ds.variables[z_axis]
axis_type = "half" if cmor_target.get_z_axis(task.target)[0] == "olevhalf" else "full"
key = "-".join([getattr(zvar, "long_name"), axis_type])
if key in table_depth_axes:
z_axis_ids.append(table_depth_axes[key])
else:
depth_bounds = ds.variables[getattr(zvar, "bounds", None)]
if depth_bounds is None:
log.warning("No depth bounds found in file %s, taking midpoints" % (ds.filepath()))
depth_bounds = numpy.zeros((len(zvar[:]), 2), dtype=numpy.float64)
depth_bounds[1:, 0] = 0.5 * (zvar[0:-1] + zvar[1:])
depth_bounds[0:-1, 1] = depth_bounds[1:, 0]
depth_bounds[0, 0] = zvar[0]
depth_bounds[-1, 1] = zvar[-1]
entry = "depth_coord_half" if cmor_target.get_z_axis(task.target)[0] == "olevhalf" else "depth_coord"
units = getattr(zvar, "units", "")
if len(units) == 0:
log.warning("Assigning unit meters to depth coordinate %s without units" % entry)
units = "m"
b = depth_bounds[:, :]
b[b < 0] = 0
z_axis_id = cmor.axis(table_entry=entry, units=units, coord_vals=zvar[:], cell_bounds=b)
z_axis_ids.append(z_axis_id)
table_depth_axes[key] = z_axis_id
setattr(task, "z_axes", z_axis_ids)
def create_time_axes(ds, tasks, table):
global time_axes_
if table == "Ofx":
return
if table not in time_axes_:
time_axes_[table] = {}
log.info("Creating time axis for table %s using file %s..." % (table, ds.filepath()))
table_time_axes = time_axes_[table]
for task in tasks:
tgtdims = getattr(task.target, cmor_target.dims_key)
for time_dim in [d for d in list(set(tgtdims.split())) if d.startswith("time")]:
if time_dim in table_time_axes:
time_operator = getattr(task.target, "time_operator", ["point"])
nc_operator = getattr(ds.variables[task.source.variable()], "online_operation", "instant")
if time_operator[0] in ["point", "instant"] and nc_operator != "instant":
log.warning("Cmorizing variable %s with online operation attribute %s in %s to %s with time "
"operation %s" % (task.source.variable(), nc_operator, ds.filepath(), str(task.target),
time_operator[0]))
if time_operator[0] in ["mean", "average"] and nc_operator != "average":
log.warning("Cmorizing variable %s with online operation attribute %s in %s to %s with time "
"operation %s" % (task.source.variable(), nc_operator, ds.filepath(), str(task.target),
time_operator[0]))
tid = table_time_axes[time_dim]
else:
times, time_bounds, units, calendar = read_times(ds, task)
if times is None:
log.error("Failed to read time axis information from file %s, skipping variable %s in table %s" %
(ds.filepath(), task.target.variable, task.target.table))
task.set_failed()
continue
tstamps, tunits = cmor_utils.num2num(times, ref_date_, units, calendar, timeshift)
if calendar != "proleptic_gregorian":
cmor.set_cur_dataset_attribute("calendar", calendar)
if time_bounds is None:
tid = cmor.axis(table_entry=str(time_dim), units=tunits, coord_vals=tstamps)
else:
tbounds, tbndunits = cmor_utils.num2num(time_bounds, ref_date_, units, calendar, timeshift)
tid = cmor.axis(table_entry=str(time_dim), units=tunits, coord_vals=tstamps,
cell_bounds=tbounds)
table_time_axes[time_dim] = tid
setattr(task, "time_axis", tid)
return table_time_axes
# Creates a time axis for the currently loaded table
def read_times(ds, task):
def get_time_bounds(v):
bnd = getattr(v, "bounds", None)
if bnd in ds.variables:
res = ds.variables[bnd][:, :]
else:
res = numpy.empty([len(v[:]), 2])
res[1:, 0] = 0.5 * (v[0:-1] + v[1:])
res[:-1, 1] = res[1:, 0]
res[0, 0] = 1.5 * v[0] - 0.5 * v[1]
res[-1, 1] = 1.5 * v[-1] - 0.5 * v[-2]
return res
vals, bndvals, units, calendar = None, None, None, None
if cmor_target.is_instantaneous(task.target):
ncvar = ds.variables.get("time_instant", None)
if ncvar is not None:
vals, units, calendar = ncvar[:], getattr(ncvar, "units", None), getattr(ncvar, "calendar", None)
else:
log.warning("Could not find time_instant variable in %s, looking for generic time..." % ds.filepath())
for varname, ncvar in ds.variables.items():
if getattr(ncvar, "standard_name", "").lower() == "time":
log.warning("Found variable %s for instant time variable in file %s" % (varname, ds.filepath()))
vals, units, calendar = ncvar[:], getattr(ncvar, "units", None), getattr(ncvar, "calendar", None)
break
if vals is None:
log.error("Could not find time variable in %s for %s... giving up" % (ds.filepath(), str(task.target)))
else:
ncvar = ds.variables.get("time_centered", None)
if ncvar is not None:
vals, bndvals, units, calendar = ncvar[:], get_time_bounds(ncvar), getattr(ncvar, "units", None), \
getattr(ncvar, "calendar", None)
else:
log.warning("Could not find time_centered variable in %s, looking for generic time..." % ds.filepath())
for varname, ncvar in ds.variables.items():
if getattr(ncvar, "standard_name", "").lower() == "time":
log.warning("Found variable %s for instant time variable in file %s" % (varname, ds.filepath()))
vals, bndvals, units, calendar = ncvar[:], get_time_bounds(ncvar), getattr(ncvar, "units", None), \
getattr(ncvar, "calendar", None)
break
if vals is None:
log.error("Could not find time variable in %s for %s... giving up" % (ds.filepath(), str(task.target)))
# Fix for proleptic gregorian in XIOS output as gregorian
if calendar is None or calendar == "gregorian":
calendar = "proleptic_gregorian"
return vals, bndvals, units, calendar
def create_type_axes(ds, tasks, table):
global type_axes_
if table not in type_axes_:
type_axes_[table] = {}
log.info("Creating extra axes for table %s using file %s..." % (table, ds.filepath()))
table_type_axes = type_axes_[table]
for task in tasks:
tgtdims = set(getattr(task.target, cmor_target.dims_key).split()).intersection(extra_axes.keys())
for dim in tgtdims:
if dim in table_type_axes:
axis_id = table_type_axes[dim]
else:
axisinfo = extra_axes[dim]
nc_dim_name = axisinfo["ncdim"]
if nc_dim_name in ds.dimensions:
ncdim, ncvals = ds.dimensions[nc_dim_name], axisinfo.get("ncvals", [])
if len(ncdim) == len(ncvals):
axis_values, axis_unit = ncvals, axisinfo.get("ncunits", "1")
else:
if any(ncvals):
log.error("Ece2cmor values for extra axis %s, %s, do not match dimension %s length %d found"
" in file %s, taking values found in file" % (dim, str(ncvals), nc_dim_name,
len(ncdim), ds.filepath()))
ncvars = [v for v in ds.variables if list(ds.variables[v].dimensions) == [ncdim]]
axis_values, axis_unit = list(range(len(ncdim))), "1"
if any(ncvars):
if len(ncvars) > 1:
log.warning("Multiple axis variables found for dimension %s in file %s, choosing %s" %
(nc_dim_name, ds.filepath(), ncvars[0]))
axis_values, axis_unit = list(ncvars[0][:]), getattr(ncvars[0], "units", None)
else:
log.error("Dimension %s could not be found in file %s, inserting using length-one dimension "
"instead" % (nc_dim_name, ds.filepath()))
axis_values, axis_unit = [1], "1"
if "ncbnds" in axisinfo:
bndlist = axisinfo["ncbnds"]
if len(bndlist) - 1 != len(axis_values):
log.error("Length of axis bounds %d does not correspond to axis coordinates %s" %
(len(bndlist) - 1, str(axis_values)))
bnds = numpy.zeros((len(axis_values), 2))
bnds[:, 0] = bndlist[:-1]
bnds[:, 1] = bndlist[1:]
axis_id = cmor.axis(table_entry=dim, coord_vals=axis_values, units=axis_unit, cell_bounds=bnds)
else:
axis_id = cmor.axis(table_entry=dim, coord_vals=axis_values, units=axis_unit)
table_type_axes[dim] = axis_id
setattr(task, dim + "_axis", axis_id)
return table_type_axes
# Selects files with data with the given frequency
def select_freq_files(freq, varname):
global exp_name_, nemo_files_
if freq == "fx":
nemo_freq = "1y"
elif freq in ["yr", "yrPt"]:
nemo_freq = "1y"
elif freq == "monPt":
nemo_freq = "1m"
# TODO: Support climatological variables
# elif freq == "monC":
# nemo_freq = "1m" # check
elif freq.endswith("mon"):
n = 1 if freq == "mon" else int(freq[:-3])
nemo_freq = str(n) + "m"
elif freq.endswith("day"):
n = 1 if freq == "day" else int(freq[:-3])
nemo_freq = str(n) + "d"
elif freq.endswith("hr"):
n = 1 if freq == "hr" else int(freq[:-2])
nemo_freq = str(n) + "h"
elif freq.endswith("hrPt"):
n = 1 if freq == "hrPt" else int(freq[:-4])
nemo_freq = str(n) + "h"
else:
log.error('Could not associate cmor frequency {:7} with a '
'nemo output frequency for variable {}'.format(freq, varname))
return []
return [f for f in nemo_files_ if cmor_utils.get_nemo_frequency(f, exp_name_) == nemo_freq]
def create_masks(tasks):
global nemo_masks_
for task in tasks:
mask = getattr(task.target, cmor_target.mask_key, None)
if mask is not None and mask not in nemo_masks_.keys():
for nemo_file in nemo_files_:
ds = netCDF4.Dataset(nemo_file, 'r')
maskvar = ds.variables.get(mask, None)
if maskvar is not None:
dims = maskvar.dimensions
if len(dims) == 2:
nemo_masks_[mask] = numpy.logical_not(numpy.ma.getmask(maskvar[...]))
elif len(dims) == 3:
nemo_masks_[mask] = numpy.logical_not(numpy.ma.getmask(maskvar[0, ...]))
else:
log.error("Could not create mask %s from nc variable with %d dimensions" % (mask, len(dims)))
# Reads all the NEMO grid data from the input files.
def create_grids(tasks):
task_by_file = cmor_utils.group(tasks, lambda tsk: getattr(tsk, cmor_task.output_path_key, None))
def get_nemo_grid(f):
if f == bathy_file_:
return bathy_grid_
if f == basin_file_:
return basin_grid_
return cmor_utils.get_nemo_grid(f)
file_by_grid = cmor_utils.group(task_by_file.keys(), get_nemo_grid)
for grid_name, file_paths in file_by_grid.iteritems():
output_files = set(file_paths) - {bathy_file_, basin_file_, None}
if any(output_files):
filename = list(output_files)[0]
else:
filename = file_paths[0]
log.warning("Using the file %s of EC-Earth to build %s due to lack of other output" % (filename, grid_name))
grid = read_grid(filename)
write_grid(grid, [t for fname in file_paths for t in task_by_file[fname]])
# Reads a particular NEMO grid from the given input file.
def read_grid(ncfile):
ds = None
try:
ds = netCDF4.Dataset(ncfile, 'r')
name = getattr(ds.variables["nav_lon"], "nav_model", cmor_utils.get_nemo_grid(ncfile))
if name == "scalar":
return None
lons = ds.variables["nav_lon"][:, :] if "nav_lon" in ds.variables else []
lats = ds.variables["nav_lat"][:, :] if "nav_lat" in ds.variables else []
if len(lons) == 0 and len(lats) == 0:
return None
return nemo_grid(name, lons, lats)
finally:
if ds is not None:
ds.close()
# Transfers the grid to cmor.
def write_grid(grid, tasks):
global grid_ids_, lat_axes_
nx = grid.lons.shape[0]
ny = grid.lons.shape[1]
if ny == 1:
if nx == 1:
log.error("The grid %s consists of a single point which is not supported, dismissing variables %s" %
(grid.name, ','.join([t.target.variable + " in " + t.target.table for t in tasks])))
return
for task in tasks:
dims = getattr(task.target, "space_dims", "")
if "longitude" in dims:
log.error("Variable %s in %s has longitude dimension, but this is absent in the ocean output file of "
"grid %s" % (task.target.variable, task.target.table, grid.name))
task.set_failed()
continue
latnames = {"latitude", "gridlatitude"}
latvars = list(set(dims).intersection(set(latnames)))
if not any(latvars):
log.error("Variable %s in %s has no (grid-)latitude defined where its output grid %s does, dismissing "
"it" % (task.target.variable, task.target.table, grid.name))
task.set_failed()
continue
if len(latvars) > 1:
log.error("Variable %s in %s with double-latitude dimensions %s is not supported" %
(task.target.variable, task.target.table, str(dims)))
task.set_failed()
continue
key = (task.target.table, grid.name, latvars[0])
if key not in lat_axes_.keys():
cmor.load_table(table_root_ + "_" + task.target.table + ".json")
lat_axis_id = cmor.axis(table_entry=latvars[0], coord_vals=grid.lats[:, 0], units="degrees_north",
cell_bounds=grid.vertex_lats)
lat_axes_[key] = lat_axis_id
else:
lat_axis_id = lat_axes_[key]
setattr(task, "grid_id", lat_axis_id)
else:
if grid.name not in grid_ids_:
cmor.load_table(table_root_ + "_grids.json")
i_index_id = cmor.axis(table_entry="j_index", units="1", coord_vals=numpy.array(range(1, nx + 1)))
j_index_id = cmor.axis(table_entry="i_index", units="1", coord_vals=numpy.array(range(1, ny + 1)))
grid_id = cmor.grid(axis_ids=[i_index_id, j_index_id],
latitude=grid.lats,
longitude=grid.lons,
latitude_vertices=grid.vertex_lats,
longitude_vertices=grid.vertex_lons)
grid_ids_[grid.name] = grid_id
else:
grid_id = grid_ids_[grid.name]
for task in tasks:
dims = getattr(task.target, "space_dims", [])
if "latitude" in dims and "longitude" in dims:
setattr(task, "grid_id", grid_id)
else:
log.error("Variable %s in %s has output on a 2d horizontal grid, but its requested dimensions are %s" %
(task.target.variable, task.target.table, str(dims)))
task.set_failed()
# Class holding a NEMO grid, including bounds arrays
class nemo_grid(object):
def __init__(self, name_, lons_, lats_):
self.name = name_
flon = numpy.vectorize(lambda x: x % 360)
flat = numpy.vectorize(lambda x: (x + 90) % 180 - 90)
self.lons = flon(nemo_grid.smoothen(lons_))
input_lats = lats_
# Dirty hack for lost precision in zonal grids:
if input_lats.shape[1] == 1:
if input_lats.shape[0] > 2 and input_lats[-1, 0] == input_lats[-2, 0]:
input_lats[-1, 0] = input_lats[-1, 0] + (input_lats[-2, 0] - input_lats[-3, 0])
self.lats = flat(input_lats)
self.vertex_lons = nemo_grid.create_vertex_lons(lons_)
self.vertex_lats = nemo_grid.create_vertex_lats(input_lats)
@staticmethod
def create_vertex_lons(a):
ny = a.shape[0]
nx = a.shape[1]
f = numpy.vectorize(lambda x: x % 360)
if nx == 1: # Longitudes were integrated out
if ny == 1:
return f(numpy.array([a[0, 0]]))
return numpy.zeros([ny, 2])
b = numpy.zeros([ny, nx, 4])
b[:, 1:nx, 0] = f(0.5 * (a[:, 0:nx - 1] + a[:, 1:nx]))
b[:, 0, 0] = f(1.5 * a[:, 0] - 0.5 * a[:, 1])
b[:, 0:nx - 1, 1] = b[:, 1:nx, 0]
b[:, nx - 1, 1] = f(1.5 * a[:, nx - 1] - 0.5 * a[:, nx - 2])
b[:, :, 2] = b[:, :, 1]
b[:, :, 3] = b[:, :, 0]
return b
@staticmethod
def create_vertex_lats(a):
ny = a.shape[0]
nx = a.shape[1]
f = | numpy.vectorize(lambda x: (x + 90) % 180 - 90) | numpy.vectorize |
import numpy as np
import itertools
from scipy.stats import norm, chi, t
from scipy.special import erf, erfinv
from scipy.stats import beta
from time import time
# Sample Sets
class VectorRV:
def __init__(self, name, scaling=1.0):
self._name = name
self._scaling = scaling
self._value = None
@property
def name(self):
return self._name
def set_value(self, value):
self._value = value
def is_set(self):
return not(self._value is None)
def value(self):
return self._value
def __eq__(self, other):
return
class SampleSet:
def __init__(self, name, scaling=1.0):
self._name = name
self._scaling = scaling
self._value = None
def set_value(self, value):
self._value = value
def is_set(self):
return not(self._value is None)
def value(self):
return self._value
# def __init__(self, name):
# self._name = name
# super().__init(name)
@property
def name(self):
return self._name
def __getitem__(self, vrange, name=None):
assert isinstance(vrange, IndexSampleSet)
assert self.is_set() and vrange.is_set(), 'No data specified'
name = ('%s[%s]' % (self.name, vrange.name)) if name is None else name
sampleset = self.copy(name)
sampleset.set_value(self._value[vrange._value])
return sampleset
def copy(self, name):
name = (self.name + ' copy') if name is None else name
sampleset = type(self)(name)
sampleset.set_value(self._value)
return sampleset
class BoundedRealSampleSet(SampleSet):
def __init__(self, name, lower=-np.inf, upper=np.inf):
super().__init__(name)
self._lower = lower
self._upper = upper
self._range = (upper-lower) if not(np.isinf(lower) or np.isinf(upper)) else np.inf
def expected_value(self, name=None, mode='trivial', scaling=1.0):
name = 'E[%s]'%self.name if name is None else name
return BoundedRealExpectedValueRV(name, self, mode=mode, scaling=scaling)
def clamp(self, X):
return np.maximum(self._lower, np.minimum(self._upper, X))
def copy(self, name=None):
name = (self.name + ' copy') if name is None else name
sampleset = BoundedRealSampleSet(name, lower=self.lower, upper=self.upper)
sampleset.set_value(self._value)
return sampleset
class BinomialSampleSet(SampleSet):
def __init__(self, name):
super().__init__(name)
def proportion(self, name=None, mode='trivial', scaling=1.0):
name = 'Pr[%s=1]' % self.name if name is None else name
return BinomialProportionRV(name, self, mode=mode, scaling=scaling)
class IndexSampleSet(SampleSet):
def __init__(self, name):
super().__init__(name)
################################################
# ScalarRVs Interfaces #
################################################
# A ScalarRV is a single quantity that can be estimated and bounded
# A ConstantScalarRV is a ScalarRV that defines a constant
# An ObservedScalarRV is a ScalarRV that is associated with a SampleSet and
# constructs estimates/bounds based on statistics on the SampleSet's contents
# ObservedScalarRVs should not be instantiated directly
# A FunctionScalarRVs is a ScalarRV that represents a function applied to one
# or more other ScalarRVs, and computes estimates/bounds recursively
# based on the estimates/bounds of the consituent ScalarRVs
# FunctionScalarRVs should not be instantiated directly
class ScalarRV:
def __init__(self, name, scaling=1.0):
self._name = name
self._scaling = scaling
@property
def name(self):
return self._name
def upper(self, delta, split_delta=False, n_scale=1.0):
return self.bound(delta, side='upper', split_delta=split_delta, n_scale=n_scale)
def lower(self, delta, split_delta=False, n_scale=1.0):
return self.bound(delta, side='lower', split_delta=split_delta, n_scale=n_scale)
def bound(self, delta, side='both', split_delta=False, n_scale=1):
if split_delta:
n_bounds = len(self.get_observed())
delta = delta / n_bounds
l,u = self._bound(delta, n_scale=n_scale)
# rescale the bound if needed
if not(any(np.isinf([l,u])) or any(np.isnan([l,u]))):
mod = 0.5*(u - l)*(self._scaling-1)
l,u = l-mod, u+mod
if side == 'upper':
return u
elif side == 'lower':
return l
return (l,u)
def value(self):
raise NotImplementedError()
def _bound(self, delta, n_scale=1.0):
raise NotImplementedError()
def __add__(self, other):
return SumRV(self, other)
def __div__(self, other):
return RatioRV(self, other)
def __truediv__(self, other):
return self.__div__(other)
def __mul__(self, other):
return ProductRV(self, other)
def __neg__(self):
return NegativeRV(self)
def __sub__(self, other):
return SumRV(self, -other)
def get_observed(self):
return np.array([])
def _recip_value(self):
return 1.0/self.value()
class ObservedScalarRV(ScalarRV):
''' Parent class to represent a quantity estimated from a SampleSet. '''
def __init__(self, name, samples, scaling=1.0):
super().__init__(name, scaling=scaling)
self._samples = samples
def _get_samples(self):
assert self._samples.is_set(), 'ScalarRV: samples not set.'
return self._samples.value()
def get_observed(self):
return np.array([ self.name ])
class FunctionScalarRV(ScalarRV):
''' Parent class to represent a scalar-valued function of ScalarRVs. '''
def __init__(self, name, rvs, scaling=1.0):
msg = 'can only define compound RVs from other scalar RVs.'
assert all([ isinstance(rv,ScalarRV) for rv in rvs ]), msg
super().__init__(name, scaling=scaling)
self._rvs = rvs
def get_observed(self):
return np.unique(np.concatenate([ rv.get_observed() for rv in self._rvs ]))
################################################
# ObservedScalarRVs #
################################################
class ConstantScalarRV(ScalarRV):
''' Concrete class to represent constants. '''
def __init__(self, value, name=None):
name = str(value) if name is None else name
super().__init__(name)
self._value = value
def value(self):
return self._value
def _bound(self, delta, n_scale=1.0):
return (self._value, self._value)
def _sign(self):
return np.sign(self.value())
def __repr__(self):
return self.name
################################################
# ObservedScalarRVs #
################################################
class BoundedRealExpectedValueRV(ObservedScalarRV):
def __init__(self, name, samples, mode='trivial', scaling=1.0):
assert isinstance(samples, BoundedRealSampleSet), ('Cannot create BoundedRealExpectedValueRV from type \'%s\'' % samples.__class__.__name__)
super().__init__(name, samples, scaling=scaling)
self.set_mode(mode)
def set_mode(self, mode):
self._mode = mode
def value(self):
S = self._get_samples()
return np.mean(S) if len(S) > 0 else np.nan
def _sign(self):
return np.sign(self.value())
def _bound(self, delta, n_scale=1.0):
if self._mode == 'trivial':
return (self._samples._lower, self._samples._upper)
# Get statistics of the samples
S = self._get_samples()
if len(S) == 0 or n_scale == 0:
return (self._samples._lower, self._samples._upper)
n, mean = len(S) * n_scale, np.mean(S)
S_range = self._samples._range
# Compute the bound
if self._mode == 'hoeffding':
offset = S_range * np.sqrt(0.5* | np.log(2/delta) | numpy.log |
# Copyright 2017-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (Union, Iterable, Tuple, List)
from pandas import (DataFrame, concat, Series)
import numpy as np
from scipy import optimize
from scipy.stats import norm
from spotify_confidence.analysis.constants import (
INCREASE_PREFFERED, DECREASE_PREFFERED, TWO_SIDED,
NIM_TYPE, NIM_INPUT_COLUMN_NAME, PREFERRED_DIRECTION_INPUT_NAME,
NIM, NULL_HYPOTHESIS, PREFERENCE,
SFX1, SFX2, POINT_ESTIMATE)
def get_all_group_columns(categorical_columns: Iterable,
ordinal_column: str) -> Iterable:
all_columns = categorical_columns + [ordinal_column]
all_columns = [col for col in all_columns if col is not None]
return all_columns
def validate_categorical_columns(
categorical_group_columns: Union[str, Iterable]) -> Iterable:
if isinstance(categorical_group_columns, str):
pass
elif isinstance(categorical_group_columns, Iterable):
pass
else:
raise TypeError("""categorical_group_columns must be string or
iterable (list of columns) and you must
provide at least one""")
def listify(column_s: Union[str, Iterable]) -> List:
if isinstance(column_s, str):
return [column_s]
elif isinstance(column_s, Iterable):
return list(column_s)
elif column_s is None:
return []
def get_remaning_groups(all_groups: Iterable,
some_groups: Iterable) -> Iterable:
if some_groups is None:
remaining_groups = all_groups
else:
remaining_groups = [
group for group in all_groups
if group not in some_groups and group is not None
]
return remaining_groups
def validate_levels(df: DataFrame,
level_columns: Union[str, Iterable],
levels: Iterable):
for level in levels:
try:
df.groupby(level_columns).get_group(level)
except (KeyError, ValueError):
raise ValueError("""
Invalid level: '{}'
Must supply a level within the ungrouped dimensions: {}
Valid levels:
{}
""".format(
level, level_columns,
list(df.groupby(level_columns).groups.keys())))
def add_nim_columns(df: DataFrame, nims: NIM_TYPE) -> DataFrame:
def _nim_2_signed_nim(nim: Tuple[float, str]) -> Tuple[float, float, str]:
nim_value = 0 if nim[0] is None or (type(nim[0]) is float and np.isnan(nim[0])) else nim[0]
if nim[1] is None or (type(nim[1]) is float and np.isnan(nim[1])):
return (nim[0], nim_value, TWO_SIDED)
elif nim[1].lower() == INCREASE_PREFFERED:
return (nim[0], -nim_value, 'larger')
elif nim[1].lower() == DECREASE_PREFFERED:
return (nim[0], nim_value, 'smaller')
else:
raise ValueError(f'{nim[1].lower()} not in '
f'{[INCREASE_PREFFERED, DECREASE_PREFFERED]}')
if nims is None:
return (
df.assign(**{NIM: None})
.assign(**{NULL_HYPOTHESIS: 0})
.assign(**{PREFERENCE: TWO_SIDED})
)
elif type(nims) is tuple:
return (
df.assign(**{NIM: _nim_2_signed_nim((nims[0], nims[1]))[0]})
.assign(**{NULL_HYPOTHESIS: df[POINT_ESTIMATE] * _nim_2_signed_nim((nims[0], nims[1]))[1]})
.assign(**{PREFERENCE: _nim_2_signed_nim((nims[0], nims[1]))[2]})
)
elif type(nims) is dict:
sgnd_nims = {group: _nim_2_signed_nim(nim) for group, nim in nims.items()}
nim_df = (
DataFrame(index=df.index,
columns=[NIM, NULL_HYPOTHESIS, PREFERENCE],
data=list(df.index.to_series().map(sgnd_nims)))
)
return (
df.assign(**{NIM: nim_df[NIM]})
.assign(**{NULL_HYPOTHESIS: df[POINT_ESTIMATE] * nim_df[NULL_HYPOTHESIS]})
.assign(**{PREFERENCE: nim_df[PREFERENCE]})
)
elif type(nims) is bool:
return (
df.assign(**{NIM: lambda df: df[NIM_INPUT_COLUMN_NAME]})
.assign(**{NULL_HYPOTHESIS: lambda df: df.apply(
lambda row: row[POINT_ESTIMATE] * _nim_2_signed_nim((row[NIM], row[PREFERRED_DIRECTION_INPUT_NAME]))[1],
axis=1)})
.assign(**{PREFERENCE: lambda df: df.apply(lambda row: _nim_2_signed_nim(
(row[NIM], row[PREFERRED_DIRECTION_INPUT_NAME]))[2], axis=1)})
)
else:
raise ValueError(f'non_inferiority_margins must be None, tuple, dict,'
f'or DataFrame, but is {type(nims)}.')
def equals_none_or_nan(x, y):
return True if x == y or (x is None and y is None) \
or (type(x) is float and type(y) is float and np.isnan(x) and np.isnan(y)) else False
def validate_and_rename_nims(df: DataFrame) -> DataFrame:
if (df.apply(lambda row: equals_none_or_nan(row[NIM + SFX1], row[NIM + SFX2]), axis=1).all() and
df.apply(lambda row: equals_none_or_nan(row[PREFERENCE + SFX1], row[PREFERENCE + SFX2]), axis=1).all()):
return (
df.rename(columns={NIM + SFX1: NIM,
NULL_HYPOTHESIS + SFX1: NULL_HYPOTHESIS,
PREFERENCE + SFX1: PREFERENCE})
.drop(columns=[NIM + SFX2,
NULL_HYPOTHESIS + SFX2,
PREFERENCE + SFX2])
)
raise ValueError("Non-inferiority margins do not agree across levels")
def validate_and_rename_final_expected_sample_sizes(df: DataFrame, column: str) -> DataFrame:
if column is None:
return df
if df.apply(lambda row: equals_none_or_nan(row[column + SFX1], row[column + SFX2]), axis=1).all():
return (
df.rename(columns={column + SFX1: column})
.drop(columns=[column + SFX2])
)
raise ValueError("Final expected sample sizes do not agree across levels")
def select_levels(df: DataFrame,
level_columns: Union[str, Iterable],
level_1: Union[str, Tuple],
level_2: Union[str, Tuple]) -> DataFrame:
gdf = df.groupby(level_columns)
return concat([gdf.get_group(level_1), gdf.get_group(level_2)])
def level2str(level: Union[str, Tuple]) -> str:
if isinstance(level, str) or not isinstance(level, Iterable):
return str(level)
else:
return ', '.join([str(sub_level) for sub_level in level])
def validate_data(df: DataFrame,
numerator: str,
numerator_sumsq: str,
denominator: str,
group_columns: Iterable,
ordinal_group_column: str):
"""Integrity check input dataframe.
"""
_validate_column(df, numerator)
if numerator_sumsq is not None:
_validate_column(df, numerator_sumsq)
_validate_column(df, denominator)
if not group_columns:
raise ValueError("""At least one of `categorical_group_columns`
or `ordinal_group_column` must be specified."""
)
for col in group_columns:
_validate_column(df, col)
# Ensure there's at most 1 observation per grouping.
max_one_row_per_grouping = all(
df.groupby(group_columns).size() <= 1)
if not max_one_row_per_grouping:
raise ValueError(
"""Each grouping should have at most 1 observation.""")
if ordinal_group_column:
ordinal_column_type = df[
ordinal_group_column].dtype.type
if not np.issubdtype(ordinal_column_type, np.number) \
and not issubclass(ordinal_column_type, np.datetime64):
raise TypeError("""`ordinal_group_column` is type `{}`.
Must be number or datetime type.""".format(ordinal_column_type))
def _validate_column(df: DataFrame, col: str):
if col not in df.columns:
raise ValueError(f"""Column {col} is not in dataframe""")
def _get_finite_bounds(numbers: Series) -> Tuple[float, float]:
finite_numbers = numbers[numbers.abs() != float("inf")]
return finite_numbers.min(), finite_numbers.max()
def axis_format_precision(numbers: Series,
absolute: bool,
extra_zeros: int = 0) -> Tuple[str, float, float]:
min_value, max_value = _get_finite_bounds(numbers)
if max_value == min_value:
return "0.00", min_value, max_value
extra_zeros += 2 if absolute else 0
precision = -int(np.log10(abs(max_value - min_value))) + extra_zeros
zeros = ''.join(['0'] * precision)
return "0.{}{}".format(zeros, '' if absolute else '%'), min_value, max_value
def to_finite(s: Series, limit: float) -> Series:
return s.clip(-100*abs(limit), 100*abs(limit))
def add_color_column(df: DataFrame, cols: Iterable) -> DataFrame:
return df.assign(color=df[cols].agg(level2str, axis='columns'))
def power_calculation(mde: float,
baseline_var: float,
alpha: float,
n1: int,
n2: int) -> float:
z_alpha = norm.ppf(1 - alpha / 2)
a = abs(mde) / np.sqrt(baseline_var)
b = np.sqrt(n1 * n2 / (n1 + n2))
z_stat = a * b
return norm.cdf(z_stat - z_alpha) + norm.cdf(-z_stat - z_alpha)
###################################################################################################
#################### current powered effect
def _currently_powered_effect(
control_avg: float,
control_var: float,
metric_type: str,
non_inferiority: bool = False,
power: float = None,
alpha: float = None,
z_power: float = None,
z_alpha: float = None,
kappa: float = None,
proportion_of_total: float = None,
current_number_of_units: float = None,
):
z_alpha = norm.ppf(1 - alpha) if z_alpha is None else z_alpha
z_power = norm.ppf(power) if z_power is None else z_power
if metric_type == BINARY and not non_inferiority:
effect = _search_MDE_binary_local_search(
control_avg=control_avg,
control_var=control_var,
non_inferiority=non_inferiority,
kappa=kappa,
proportion_of_total=proportion_of_total,
current_number_of_units=current_number_of_units,
z_alpha=z_alpha,
z_power=z_power,
)[0]
else:
treatment_var = _get_hypothetical_treatment_var(
metric_type, non_inferiority, control_avg, control_var, hypothetical_effect=0
)
n2_partial = np.power((z_alpha + z_power), 2) * (control_var / kappa + treatment_var)
effect = | np.sqrt((1 / (current_number_of_units * proportion_of_total)) * (n2_partial + kappa * n2_partial)) | numpy.sqrt |
"""
Script entry point
"""
from src.calrissian.particle333_network import Particle333Network
from src.calrissian.layers.particle333 import Particle333
from src.calrissian.optimizers.particle333_sgd import Particle333SGD
from multiprocessing import Pool
import numpy as np
import time
import pandas as pd
import pickle
def main():
train_X = np.asarray([[0.45, 3.33], [0.0, 2.22], [0.45, -0.54]])
train_Y = np.asarray([[1.0], [0.0], [0.0]])
net = Particle333Network(cost="mse")
net.append(Particle333(2, 5, activation="sigmoid", nr=4, nc=6))
net.append(Particle333(5, 1, activation="sigmoid", nr=4, nc=6))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
print(net.cost_gradient(train_X, train_Y))
def main2():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = np.random.normal(0.0, 0.1, (3, 1))
nr = 3
nc = 3
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(2, 2, 3),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(2, 2, 3),
output_shape=(2, 2, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(4, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def main3():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = np.random.normal(0.0, 0.1, (3, 1))
nr = 3
nc = 3
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(2, 2, 3),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5),
output_pool_shape=(2, 2, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(2, 2, 3),
output_shape=(2, 2, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(4, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def main4():
train_X = np.random.normal(0.0, 0.1, (1, 4*4))
train_Y = np.random.normal(0.0, 0.1, (1, 1))
nr = 3
nc = 1
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(1, 1, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(1, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def fd():
ts = time.time()
train_X = None
train_Y = None
nc = None
nr = None
net = None
if False:
train_X = np.asarray([[0.2, -0.3], [0.1, -0.9], [0.1, 0.05], [0.2, -0.3], [0.1, -0.9], [0.1, 0.05]])
train_Y = np.asarray(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
nc = 4
nr = 3
net = Particle333Network(cost="categorical_cross_entropy")
net.append(Particle333(2, 5, activation="sigmoid", nr=nr, nc=nc))
net.append(Particle333(5, 6, activation="sigmoid", nr=nr, nc=nc))
net.append(Particle333(6, 3, activation="softmax", nr=nr, nc=nc))
else:
train_X = np.random.normal(0.0, 1.0, (3, 4 * 4))
train_Y = | np.random.choice([0.0, 1.0], (3, 1)) | numpy.random.choice |
import sys
import numpy as np
import scipy.sparse as sp
from ctypes import c_int, byref
from numpy.ctypeslib import ndpointer
import time
import qutip.settings as qset
# Load solver functions from mkl_lib
pardiso = qset.mkl_lib.pardiso
pardiso_delete = qset.mkl_lib.pardiso_handle_delete
if sys.maxsize > 2**32: # Running 64-bit
pardiso_64 = qset.mkl_lib.pardiso_64
pardiso_delete_64 = qset.mkl_lib.pardiso_handle_delete_64
def _pardiso_parameters(hermitian, has_perm,
max_iter_refine,
scaling_vectors,
weighted_matching):
iparm = np.zeros(64, dtype=np.int32)
iparm[0] = 1 # Do not use default values
iparm[1] = 3 # Use openmp nested dissection
if has_perm:
iparm[4] = 1
iparm[7] = max_iter_refine # Max number of iterative refinements
if hermitian:
iparm[9] = 8
else:
iparm[9] = 13
if not hermitian:
iparm[10] = int(scaling_vectors)
iparm[12] = int(weighted_matching) # Non-symmetric weighted matching
iparm[17] = -1
iparm[20] = 1
iparm[23] = 1 # Parallel factorization
iparm[26] = 0 # Check matrix structure
iparm[34] = 1 # Use zero-based indexing
return iparm
# Set error messages
pardiso_error_msgs = {
'-1': 'Input inconsistant',
'-2': 'Out of memory',
'-3': 'Reordering problem',
'-4':
'Zero pivot, numerical factorization or iterative refinement problem',
'-5': 'Unclassified internal error',
'-6': 'Reordering failed',
'-7': 'Diagonal matrix is singular',
'-8': '32-bit integer overflow',
'-9': 'Not enough memory for OOC',
'-10': 'Error opening OOC files',
'-11': 'Read/write error with OOC files',
'-12': 'Pardiso-64 called from 32-bit library',
}
def _default_solver_args():
return {
'hermitian': False,
'posdef': False,
'max_iter_refine': 10,
'scaling_vectors': True,
'weighted_matching': True,
'return_info': False,
}
class mkl_lu:
"""
Object pointing to LU factorization of a sparse matrix
generated by mkl_splu.
Methods
-------
solve(b, verbose=False)
Solve system of equations using given RHS vector 'b'.
Returns solution ndarray with same shape as input.
info()
Returns the statistics of the factorization and
solution in the lu.info attribute.
delete()
Deletes the allocated solver memory.
"""
def __init__(self, np_pt=None, dim=None, is_complex=None, data=None,
indptr=None, indices=None, iparm=None, np_iparm=None,
mtype=None, perm=None, np_perm=None, factor_time=None):
self._np_pt = np_pt
self._dim = dim
self._is_complex = is_complex
self._data = data
self._indptr = indptr
self._indices = indices
self._iparm = iparm
self._np_iparm = np_iparm
self._mtype = mtype
self._perm = perm
self._np_perm = np_perm
self._factor_time = factor_time
self._solve_time = None
def solve(self, b, verbose=None):
b_shp = b.shape
if b.ndim == 2 and b.shape[1] == 1:
b = b.ravel()
nrhs = 1
elif b.ndim == 2 and b.shape[1] != 1:
nrhs = b.shape[1]
b = b.ravel(order='F')
else:
b = b.ravel()
nrhs = 1
data_type = np.complex128 if self._is_complex else np.float64
if b.dtype != data_type:
b = b.astype(np.complex128, copy=False)
# Create solution array (x) and pointers to x and b
x = np.zeros(b.shape, dtype=data_type, order='C')
np_x = x.ctypes.data_as(ndpointer(data_type, ndim=1, flags='C'))
np_b = b.ctypes.data_as(ndpointer(data_type, ndim=1, flags='C'))
error = np.zeros(1, dtype=np.int32)
np_error = error.ctypes.data_as(ndpointer(np.int32, ndim=1, flags='C'))
# Call solver
_solve_start = time.time()
pardiso(
self._np_pt,
byref(c_int(1)),
byref(c_int(1)),
byref(c_int(self._mtype)),
byref(c_int(33)),
byref(c_int(self._dim)),
self._data,
self._indptr,
self._indices,
self._np_perm,
byref(c_int(nrhs)),
self._np_iparm,
byref(c_int(0)),
np_b,
np_x,
np_error,
)
self._solve_time = time.time() - _solve_start
if error[0] != 0:
raise Exception(pardiso_error_msgs[str(error[0])])
if verbose:
print('Solution Stage')
print('--------------')
print('Solution time: ',
round(self._solve_time, 4))
print('Solution memory (Mb): ',
round(self._iparm[16]/1024, 4))
print('Number of iterative refinements:',
self._iparm[6])
print('Total memory (Mb): ',
round(sum(self._iparm[15:17])/1024, 4))
print()
return np.reshape(x, b_shp, order=('C' if nrhs == 1 else 'F'))
def info(self):
info = {'FactorTime': self._factor_time,
'SolveTime': self._solve_time,
'Factormem': round(self._iparm[15]/1024, 4),
'Solvemem': round(self._iparm[16]/1024, 4),
'IterRefine': self._iparm[6]}
return info
def delete(self):
# Delete all data
error = np.zeros(1, dtype=np.int32)
np_error = error.ctypes.data_as(ndpointer(np.int32, ndim=1, flags='C'))
pardiso(
self._np_pt,
byref(c_int(1)),
byref(c_int(1)),
byref(c_int(self._mtype)),
byref(c_int(-1)),
byref(c_int(self._dim)),
self._data,
self._indptr,
self._indices,
self._np_perm,
byref(c_int(1)),
self._np_iparm,
byref(c_int(0)),
byref(c_int(0)),
byref(c_int(0)),
np_error,
)
if error[0] == -10:
raise Exception('Error freeing solver memory')
_MATRIX_TYPE_NAMES = {
4: 'Complex Hermitian positive-definite',
-4: 'Complex Hermitian indefinite',
2: 'Real symmetric positive-definite',
-2: 'Real symmetric indefinite',
11: 'Real non-symmetric',
13: 'Complex non-symmetric',
}
def _mkl_matrix_type(dtype, solver_args):
if not solver_args['hermitian']:
return 13 if dtype == np.complex128 else 11
out = 4 if dtype == np.complex128 else 2
return out if solver_args['posdef'] else -out
def mkl_splu(A, perm=None, verbose=False, **kwargs):
"""
Returns the LU factorization of the sparse matrix A.
Parameters
----------
A : csr_matrix
Sparse input matrix.
perm : ndarray (optional)
User defined matrix factorization permutation.
verbose : bool {False, True}
Report factorization details.
Returns
-------
lu : mkl_lu
Returns object containing LU factorization with a
solve method for solving with a given RHS vector.
"""
if not sp.isspmatrix_csr(A):
raise TypeError('Input matrix must be in sparse CSR format.')
if A.shape[0] != A.shape[1]:
raise Exception('Input matrix must be square')
dim = A.shape[0]
solver_args = _default_solver_args()
if set(kwargs) - set(solver_args):
raise ValueError(
"Unknown keyword arguments pass to mkl_splu: {!r}"
.format(set(kwargs) - set(solver_args))
)
solver_args.update(kwargs)
# If hermitian, then take upper-triangle of matrix only
if solver_args['hermitian']:
B = sp.triu(A, format='csr')
A = B # This gets around making a full copy of A in triu
is_complex = bool(A.dtype == np.complex128)
if not is_complex:
A = sp.csr_matrix(A, dtype=np.float64, copy=False)
data_type = A.dtype
# Create pointer to internal memory
pt = np.zeros(64, dtype=int)
np_pt = pt.ctypes.data_as(ndpointer(int, ndim=1, flags='C'))
# Create pointers to sparse matrix arrays
data = A.data.ctypes.data_as(ndpointer(data_type, ndim=1, flags='C'))
indptr = A.indptr.ctypes.data_as( | ndpointer(np.int32, ndim=1, flags='C') | numpy.ctypeslib.ndpointer |
import os, inspect, time, math
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
PACK_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+"/.."
def make_dir(path):
try: os.mkdir(path)
except: pass
def gray2rgb(gray):
rgb = np.ones((gray.shape[0], gray.shape[1], 3)).astype(np.float32)
rgb[:, :, 0] = gray[:, :, 0]
rgb[:, :, 1] = gray[:, :, 0]
rgb[:, :, 2] = gray[:, :, 0]
return rgb
def dat2canvas(data):
numd = math.ceil(np.sqrt(data.shape[0]))
[dn, dh, dw, dc] = data.shape
canvas = np.ones((dh*numd, dw*numd, dc)).astype(np.float32)
for y in range(numd):
for x in range(numd):
try: tmp = data[x+(y*numd)]
except: pass
else: canvas[(y*dh):(y*dh)+28, (x*dw):(x*dw)+28, :] = tmp
if(dc == 1):
canvas = gray2rgb(gray=canvas)
return canvas
def save_img(contents, names, ylen, xlen, savename=""):
plt.figure(figsize=(2+(5*xlen), 5*ylen))
for y in range(ylen):
for x in range(xlen):
plt.subplot(2,3,(y*3)+(x+1))
plt.title(names[(y*3)+x])
plt.imshow(dat2canvas(data=contents[(y*3)+x]))
plt.tight_layout()
plt.savefig(savename)
plt.close()
def boxplot(contents, savename=""):
data, label = [], []
for cidx, content in enumerate(contents):
data.append(content)
label.append("class-%d" %(cidx))
plt.clf()
fig, ax1 = plt.subplots()
bp = ax1.boxplot(data, showfliers=True, whis=3)
ax1.set_xticklabels(label, rotation=45)
plt.tight_layout()
plt.savefig(savename)
plt.close()
def discrete_cmap(N, base_cmap=None):
base = plt.cm.get_cmap(base_cmap)
color_list = base( | np.linspace(0, 1, N) | numpy.linspace |
'''
Copyright (C) 2020-2021 <NAME> <<EMAIL>>
Released under the Apache-2.0 License.
'''
import os, sys, re
import functools
import torch as th
import collections
from tqdm import tqdm
import pylab as lab
import traceback
import math
import statistics
from scipy import stats
import numpy as np
import random
from .utils import IMmean, IMstd, renorm, denorm, xdnorm, chw2hwc
from termcolor import cprint, colored
def rank_attack(model, attack, loader, *, dconf, device, verbose=False):
'''
generic attack method for embedding/ranking models
'''
# >> pre-process the options
normimg = dconf.get('normimg', False)
if dconf.get('metric', None) is None:
raise ValueError('dconf parameter misses the "metric" key')
candidates = model.compute_embedding(loader, device=device,
l2norm=(True if dconf['metric']=='C' else False))
if dconf.get('TRANSFER', None) is None:
dconf['TRANSFER'] = None
else:
candidates_trans = dconf['TRANSFER']['model'].compute_embedding(
loader, device=dconf['TRANSFER']['device'],
l2norm=(True if 'C' in dconf['TRANSFER']['transfer'] else False))
dconf['TRANSFER']['candidates'] = candidates_trans
ruthless = int(os.getenv('RUTHLESS', -1)) # maxiter for attack
# >> dispatch: attacking
print('>>> Candidate Set:', candidates[0].shape, candidates[1].shape)
correct_orig, correct_adv, total = 0, 0, 0
rankup, embshift, prankgt, prank_trans = [], [], [], []
for N, (images, labels) in tqdm(enumerate(loader), total=len(loader)):
#if N < random.randint(0, 60502//2): continue # picking sample for vis
#if N < 14676//2: continue
if (ruthless > 0) and (N >= ruthless):
break
if verbose: cprint('\n'+'\u2500'*64, 'cyan')
if re.match('^Q.?:PGD-M\d+$', attack) is not None:
regroup = re.match('^Q(.?):PGD-M(\d+)$', attack).groups()
pm = str(regroup[0]) # + / -
assert(pm in ['+', '-'])
M = int(regroup[1]) # m, num of candidates
assert(M > 0)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='QA', M=M, pm=pm,
transfer=dconf['TRANSFER'])
elif re.match('^SPQ.?:PGD-M\d+$', attack) is not None:
regroup = re.match('^SPQ(.?):PGD-M(\d+)$', attack).groups()
pm = str(regroup[0]) # + / -
assert(pm in ['+', '-'])
M = int(regroup[1]) # m, num of candidates
assert(M > 0)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='SPQA', M=M, pm=pm,
transfer=dconf['TRANSFER'])
elif re.match('^F:PGD-M\d+$', attack) is not None:
regroup = re.match('^F:PGD-M(\d+)$', attack).groups()
M = int(regroup[0]) # m, num of candidates
assert(M > 1)
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='FOA', M=M, pm=None,
transfer=dconf['TRANSFER'])
elif re.match('^SPO:PGD-M\d+$', attack) is not None:
regroup = re.match('^SPO:PGD-M(\d+)$', attack).groups()
M = int(regroup[0]) # m, num of candidates
xr, r, out, loss, count = RankPGD(model, images, labels,
candidates, eps=dconf['epsilon'], verbose=verbose,
device=device, loader=loader, metric=dconf['metric'],
normimg=normimg, atype='SPFOA', M=M, pm=None,
transfer=dconf['TRANSFER'])
else:
raise ValueError(f"Attack {attack} unsupported.")
correct_orig += count[0][0]
correct_adv += count[1][0]
total += len(labels)
rankup.append(count[1][1])
embshift.append(count[1][2])
prankgt.append(count[1][3])
prank_trans.append(count[1][4])
if N*images.shape[0] > 10000: break # XXX: N=10000 for speed
total = max(1,total)
# >> report overall attacking result on the test dataset
cprint('\u2500'*64, 'cyan')
if int(os.getenv('IAP', 0)) > 0:
cprint(' '.join([f'Summary[{attack} \u03B5={dconf["epsilon"]}]:',
'white-box=', '%.3f'%statistics.mean(rankup), # abuse var
'black-box=', '%.3f'%statistics.mean(embshift), # abuse var
'white-box-orig=', '%.3f'%statistics.mean(prankgt), # abuse var
'black-box-orig=', '%.3f'%statistics.mean(prank_trans), # abuse var
]), 'cyan')
else:
cprint(' '.join([f'Summary[{attack} \u03B5={dconf["epsilon"]}]:',
'baseline=', '%.3f'%(100.*(correct_orig/total)),
'adv=', '%.3f'%(100.*(correct_adv/total)),
'advReduce=', '%.3f'%(100.*(correct_orig - correct_adv) / total),
'rankUp=', '%.3f'%statistics.mean(rankup),
'embShift=', '%.3f'%statistics.mean(embshift),
'prankgt=', '%.3f'%statistics.mean(prankgt),
'prank_trans=', '%.3f'%statistics.mean(prank_trans),
]), 'cyan')
cprint('\u2500'*64, 'cyan')
class LossFactory(object):
'''
Factory of loss functions used in all ranking attacks
'''
@staticmethod
def RankLossEmbShift(repv: th.tensor, repv_orig: th.tensor, *, metric: str):
'''
Computes the embedding shift, we want to maximize it by gradient descent
'''
if metric == 'C':
distance = 1 - th.mm(repv, repv_orig.t())
loss = -distance.trace() # gradient ascent on trace, i.e. diag.sum
elif metric == 'E':
distance = th.nn.functional.pairwise_distance(repv, repv_orig, p=2)
loss = -distance.sum()
return loss
@staticmethod
def RankLossQueryAttack(qs: th.tensor, Cs: th.tensor, Xs: th.tensor, *, metric: str, pm: str,
dist: th.tensor = None, cidx: th.tensor = None):
'''
Computes the loss function for pure query attack
'''
assert(qs.shape[1] == Cs.shape[2] == Xs.shape[1])
NIter, M, D, NX = qs.shape[0], Cs.shape[1], Cs.shape[2], Xs.shape[0]
DO_RANK = (dist is not None) and (cidx is not None)
losses, ranks = [], []
#refrank = []
for i in range(NIter):
#== compute the pairwise loss
q = qs[i].view(1, D) # [1, output_1]
C = Cs[i, :, :].view(M, D) # [1, output_1]
if metric == 'C':
A = (1 - th.mm(q, C.t())).expand(NX, M)
B = (1 - th.mm(Xs, q.t())).expand(NX, M)
elif metric == 'E':
A = (C - q).norm(2, dim=1).expand(NX, M)
B = (Xs - q).norm(2, dim=1).view(NX, 1).expand(NX, M)
#== loss function
if '+' == pm:
loss = (A-B).clamp(min=0.).mean()
elif '-' == pm:
loss = (-A+B).clamp(min=0.).mean()
losses.append(loss)
#== compute the rank
if DO_RANK:
ranks.append(th.mean(dist[i].flatten().argsort().argsort()
[cidx[i,:].flatten()].float()).item())
#refrank.append( ((A>B).float().mean()).item() )
#print('(debug)', 'rank=', statistics.mean(refrank))
loss = th.stack(losses).mean()
rank = statistics.mean(ranks) if DO_RANK else None
return loss, rank
@staticmethod
def RankLossFullOrderM2Attack(qs: th.tensor, ps: th.tensor, ns: th.tensor, *, metric: str):
'''
Computes the loss function for M=2 full-order attack
'''
assert(qs.shape[0] == ps.shape[0] == ns.shape[0])
assert(qs.shape[1] == ps.shape[1] == ns.shape[1])
Batch, D = qs.shape[0], qs.shape[1]
if metric == 'C':
dist1 = 1 - th.nn.functional.cosine_similarity(qs, ps, dim=1)
dist2 = 1 - th.nn.functional.cosine_similarity(qs, ns, dim=1)
elif metric == 'E':
dist1 = th.nn.functional.pairwise_distance(qs, ps, p=2)
dist2 = th.nn.functional.pairwise_distance(qs, ns, p=2)
else:
raise ValueError(metric)
loss = (dist1 - dist2).clamp(min=0.).mean()
acc = (dist1 <= dist2).sum().item() / Batch
return loss, acc
@staticmethod
def RankLossFullOrderMXAttack(qs: th.tensor, Cs: th.tensor, *, metric=str):
assert(qs.shape[1] == Cs.shape[2])
NIter, M, D = qs.shape[0], Cs.shape[1], Cs.shape[2]
losses, taus = [], []
for i in range(NIter):
q = qs[i].view(1, D)
C = Cs[i, :, :].view(M, D)
if metric == 'C':
dist = 1 - th.mm(q, C.t())
elif metric == 'E':
dist = (C - q).norm(2, dim=1)
tau = stats.kendalltau(np.arange(M), dist.cpu().detach().numpy())[0]
taus.append(tau)
dist = dist.expand(M, M)
loss = (dist.t() - dist).triu(diagonal=1).clamp(min=0.).mean()
losses.append(loss)
loss = th.stack(losses).mean()
tau = statistics.mean(x for x in taus if not math.isnan(x))
return loss, tau
def __init__(self, request: str):
'''
Initialize various loss functions
'''
self.funcmap = {
'QA': self.RankLossQueryAttack,
'QA+': functools.partial(self.RankLossQueryAttack, pm='+'),
'QA-': functools.partial(self.RankLossQueryAttack, pm='-'),
'FOA2': self.RankLossFullOrderM2Attack,
'FOAX': self.RankLossFullOrderMXAttack,
}
if request not in self.funcmap.keys():
raise KeyError(f'Requested loss function "{request}" not found!')
self.request = request
def __call__(self, *args, **kwargs):
return self.funcmap[self.request](*args, **kwargs)
## MARK: STAGE0
def RankPGD(model, images, labels, candi, *,
eps=0.3, alpha=1./255., atype=None, M=None, W=None, pm=None,
verbose=False, device='cpu', loader=None, metric=None,
normimg=False, transfer=None):
'''
Perform FGSM/PGD Query/Candidate attack on the given batch of images, L_infty constraint
https://github.com/tensorflow/cleverhans/blob/master/cleverhans/attacks/fast_gradient_method.py
This is the core of the adversarial ranking implementation,
but we don't have enough energy to tidy it up before ICCV submission.
'''
# >> prepare the current batch of images
assert(type(images) == th.Tensor)
images = images.clone().detach().to(device)
images_orig = images.clone().detach()
images.requires_grad = True
labels = labels.to(device).view(-1)
# >> sanity check for normalized images, if any
if normimg:
# normed_img = (image - mean)/std
IMmean = th.tensor([0.485, 0.456, 0.406], device=device)
IMstd = th.tensor([0.229, 0.224, 0.225], device=device)
renorm = lambda im: im.sub(IMmean[:,None,None]).div(IMstd[:,None,None])
denorm = lambda im: im.mul(IMstd[:,None,None]).add(IMmean[:,None,None])
if (not normimg) and ((images > 1.0).sum() + (images < 0.0).sum() > 0):
raise Exception("please toggle 'normimg' as True for sanity")
def tensorStat(t):
return f'Min {t.min().item()} Max {t.max().item()} Mean {t.mean().item()}'
#<<<<<< STAGE1: ORIG SAMPLE EVALUATION <<<<<<
model.eval()
with th.no_grad():
# -- [orig] -- forward the original samples with the original loss
# >> Result[output]: embedding vectors
# >> Result[dist]: distance matrix (current batch x database)
if metric == 'C':
output = model.forward(images, l2norm=True)
dist = 1 - output @ candi[0].t() # [num_output_num, num_candidate]
elif metric == 'E':
output = model.forward(images, l2norm=False)
dist = []
# the memory requirement is insane if we want to do the pairwise distance
# matrix in a single step like faC_c2f2_siamese.py's loss function.
for i in range(output.shape[0]):
xq = output[i].view(1, -1)
xqd = (candi[0] - xq).norm(2, dim=1).squeeze()
dist.append(xqd)
dist = th.stack(dist) # [num_output_num, num_candidate]
else:
raise ValueError(metric)
output_orig = output.clone().detach()
dist_orig = dist.clone().detach()
loss = th.tensor(-1) # we don't track this value anymore
loss_orig = th.tensor(-1) # we don't track this value anymore
#== <transfer> forward the samples with the transfer model
if transfer is not None:
if 'C' in transfer['transfer']:
output_trans = transfer['model'].forward(images, l2norm=True)
dist_trans = 1 - output_trans @ transfer['candidates'][0].t()
elif 'E' in transfer['transfer']:
output_trans = transfer['model'].forward(images, l2norm=False)
dist_trans = []
for i in range(output_trans.shape[0]):
xtrans = output_trans[i].view(1, -1)
xdtrans = (transfer['candidates'][0] - xtrans).norm(2, dim=1).squeeze()
dist_trans.append(xdtrans)
dist_trans = th.stack(dist_trans)
# -- [orig] -- select attack targets and calculate the attacking loss
if (atype in ['FOA', 'SPFOA']) and (M is not None) and (M == 2):
# -- [orig] ranking attack, M=2
#== configuration for FOA:M=2
M_GT = 5 # sensible choice due to SOP dataset property
XI = float(os.getenv('SP', 10.)) # balancing the "SP" and "QA" component
if 'SP' not in atype:
XI = None # override SP weight to None
#== select the M=2 candidates. note, x1 is closer to q than x2
if True:
# local sampling (default)
topmost = int(candi[0].size(0) * 0.01)
topxm = dist.topk(topmost+1, dim=1, largest=False)[1][:,1:] # [output_0, M]
sel = np.vstack([np.random.permutation(topmost) for j in range(topxm.shape[0])])
msample = th.stack([topxm[i][np.sort(sel[i,:M])] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = th.stack([topxm[i][np.sort(sel[i,M:])[:M_GT]] for i in range(topxm.shape[0])])
else:
# global sampling
distsort = dist.sort(dim=1)[1] # [output_0, candi_0]
mpairs = th.randint(candi[0].shape[0], (output.shape[0], M)).sort(dim=1)[0] # [output_0, M]
msample= th.stack([distsort[i, mpairs[i]] for i in range(output.shape[0])]) # [output_0, M]
if 'SP' in atype:
mgtruth = dist.topk(M_GT+1, dim=1, largest=False)[1][:,1:] # [output_0, M_GT]
embpairs = candi[0][msample, :] # [output_0, M, output_1]
if 'SP' in atype:
embgts = candi[0][mgtruth, :] # [output_0, M_GT, output_1]
# >> compute the (ordinary) loss on selected targets
loss, acc = LossFactory('FOA2')(output, embpairs[:,1,:], embpairs[:,0,:], metric=metric)
#== Semantic preserving? (SP)
if 'SP' in atype:
loss_sp, rank_gt = LossFactory('QA+')(output, embgts, candi[0],
metric=metric, dist=dist, cidx=mgtruth)
loss = loss + XI * loss_sp
prankgt_orig = rank_gt #/ candi[0].size(0)
# >> backup and report
correct_orig = acc * output.shape[0]
loss_orig = loss.clone().detach()
if verbose:
print()
if 'SP' not in atype:
print('* Original Sample', 'loss=', loss.item(), 'FOA:Accu=', acc)
else:
print('* Original Sample', 'loss=', loss.item(), 'where loss_sp=', loss_sp.item(),
'FOA:Accu=', acc, 'GT.R@mean=', rank_gt)
# <transfer>
if transfer is not None:
embpairs_trans = transfer['candidates'][0][msample, :]
_, acc_trans = LossFactory('FOA2')(output_trans, embpairs_trans[:,1,:], embpairs_trans[:,0,:],
metric=('C' if 'C' in transfer['transfer'] else 'E'))
if 'SP' not in atype:
print('* <transfer> Original Sample', 'FOA:Accu=', acc_trans)
else:
embgts_trans = transfer['candidates'][0][mgtruth, :]
_, rank_sp_trans = LossFactory('QA')(output_trans, embgts_trans,
transfer['candidates'][0], pm='+',
metric=('C' if 'C' in transfer['transfer'] else 'E'),
dist=dist_trans, cidx=mgtruth)
print('* <transfer> Original Sample', 'FOA:Accu=', acc_trans,
'GT.R@mean=', rank_sp_trans)
elif (atype in ['FOA', 'SPFOA']) and (M is not None) and (M > 2):
# -- [orig] ranking attack, M>2
#== configuration for FOA:M>2
M_GT = 5 # sensible choice due to SOP dataset property
XI = float(os.getenv('SP', 10.)) # balancing the "SP" and "QA" component
if 'SP' not in atype:
XI = None # override SP weight to None
#== select M>2 candidates, in any order
if True:
# Just select the original top-k
topxm = dist.topk(M, dim=1, largest=False)[1]
rpm = np.stack([np.random.permutation(M) for j in range(topxm.shape[0])])
msample = th.stack([topxm[i][rpm[i]] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = msample
elif False:
# local sampling (from the topmost 1% samples)
topmost = int(candi[0].size(0) * 0.01)
topxm = dist.topk(topmost+1, dim=1, largest=False)[1][:,1:] # [output_0, M]
sel = np.vstack([ | np.random.permutation(topmost) | numpy.random.permutation |
import numpy as np
a = np.array([1, 2, 3, 4])
b = np.array([2, 3, 4, 5])
c = np.vstack((a, b))
print(c)
print(c.shape)
d = | np.hstack((a, b)) | numpy.hstack |
"""
Extract slits from a MOSFIRE slitmask
"""
import glob
import os
import traceback
import astropy.io.fits as pyfits
import numpy as np
from grizli import utils
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import drizzlepac
import scipy.ndimage as nd
import peakutils
from skimage.feature import match_template
from skimage.registration import phase_cross_correlation
from tqdm import tqdm
utils.LOGFILE = 'mospipe.log'
utils.set_warnings()
def grating_dlambda(band):
"""
returns the dlambda/dpixel in angstrom for a band
(From MosfireDRP)
"""
orders = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = orders[band]
d = 1e3/110.5 # Groove spacing in micron
pixelsize, focal_length = 18.0, 250e3 # micron
scale = pixelsize/focal_length
dlambda = scale * d / order * 10000
return dlambda
# grating_summary = {'Y': {'edge':[9612, 11350]},
# 'J': {'edge':[11550, 13623]},
# 'H': {'edge':[14590, 18142]},
# 'K': {'edge':[19118, 24071]}}
grating_summary = {'Y': {'edge':[9612, 11350]},
'J': {'edge':[11450, 13550]},
'H': {'edge':[14590, 18142]},
'K': {'edge':[18900, 24150]}}
for k in grating_summary:
edge = grating_summary[k]['edge']
grating_summary[k]['dlam'] = dlam = grating_dlambda(k)
grating_summary[k]['N'] = int(np.ceil(edge[1]-edge[0])/dlam)
grating_summary[k]['ref_wave'] = (edge[1]+edge[0])/2
def get_grating_loglam(filter):
"""
Get polynomial and WCS coefficients that approximate logarithmic
wavelength spacing
"""
gr = grating_summary[filter]
edge, dlam, N = gr['edge'], gr['dlam'], gr['N']
loglam = np.logspace(np.log10(edge[0]), np.log10(edge[1]), N)
xarr = | np.arange(N) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Multi-lib backend for POT
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
try:
import torch
torch_type = torch.Tensor
except ImportError:
torch = False
torch_type = float
try:
import jax
import jax.numpy as jnp
jax_type = jax.numpy.ndarray
except ImportError:
jax = False
jax_type = float
str_type_error = "All array should be from the same type/backend. Current types are : {}"
def get_backend_list():
""" returns the list of available backends)"""
lst = [NumpyBackend(), ]
if torch:
lst.append(TorchBackend())
if jax:
lst.append(JaxBackend())
return lst
def get_backend(*args):
"""returns the proper backend for a list of input arrays
Also raises TypeError if all arrays are not from the same backend
"""
# check that some arrays given
if not len(args) > 0:
raise ValueError(" The function takes at least one parameter")
# check all same type
if isinstance(args[0], np.ndarray):
if not len(set(type(a) for a in args)) == 1:
raise ValueError(str_type_error.format([type(a) for a in args]))
return NumpyBackend()
elif torch and isinstance(args[0], torch_type):
if not len(set(type(a) for a in args)) == 1:
raise ValueError(str_type_error.format([type(a) for a in args]))
return TorchBackend()
elif isinstance(args[0], jax_type):
return JaxBackend()
else:
raise ValueError("Unknown type of non implemented backend.")
def to_numpy(*args):
"""returns numpy arrays from any compatible backend"""
if len(args) == 1:
return get_backend(args[0]).to_numpy(args[0])
else:
return [get_backend(a).to_numpy(a) for a in args]
class Backend():
__name__ = None
__type__ = None
def __str__(self):
return self.__name__
# convert to numpy
def to_numpy(self, a):
raise NotImplementedError()
# convert from numpy
def from_numpy(self, a, type_as=None):
raise NotImplementedError()
def set_gradients(self, val, inputs, grads):
""" define the gradients for the value val wrt the inputs """
raise NotImplementedError()
def zeros(self, shape, type_as=None):
raise NotImplementedError()
def ones(self, shape, type_as=None):
raise NotImplementedError()
def arange(self, stop, start=0, step=1, type_as=None):
raise NotImplementedError()
def full(self, shape, fill_value, type_as=None):
raise NotImplementedError()
def eye(self, N, M=None, type_as=None):
raise NotImplementedError()
def sum(self, a, axis=None, keepdims=False):
raise NotImplementedError()
def cumsum(self, a, axis=None):
raise NotImplementedError()
def max(self, a, axis=None, keepdims=False):
raise NotImplementedError()
def min(self, a, axis=None, keepdims=False):
raise NotImplementedError()
def maximum(self, a, b):
raise NotImplementedError()
def minimum(self, a, b):
raise NotImplementedError()
def dot(self, a, b):
raise NotImplementedError()
def abs(self, a):
raise NotImplementedError()
def exp(self, a):
raise NotImplementedError()
def log(self, a):
raise NotImplementedError()
def sqrt(self, a):
raise NotImplementedError()
def norm(self, a):
raise NotImplementedError()
def any(self, a):
raise NotImplementedError()
def isnan(self, a):
raise NotImplementedError()
def isinf(self, a):
raise NotImplementedError()
def einsum(self, subscripts, *operands):
raise NotImplementedError()
def sort(self, a, axis=-1):
raise NotImplementedError()
def argsort(self, a, axis=None):
raise NotImplementedError()
def flip(self, a, axis=None):
raise NotImplementedError()
class NumpyBackend(Backend):
__name__ = 'numpy'
__type__ = np.ndarray
def to_numpy(self, a):
return a
def from_numpy(self, a, type_as=None):
if type_as is None:
return a
elif isinstance(a, float):
return a
else:
return a.astype(type_as.dtype)
def set_gradients(self, val, inputs, grads):
# no gradients for numpy
return val
def zeros(self, shape, type_as=None):
if type_as is None:
return np.zeros(shape)
else:
return np.zeros(shape, dtype=type_as.dtype)
def ones(self, shape, type_as=None):
if type_as is None:
return np.ones(shape)
else:
return np.ones(shape, dtype=type_as.dtype)
def arange(self, stop, start=0, step=1, type_as=None):
return np.arange(start, stop, step)
def full(self, shape, fill_value, type_as=None):
if type_as is None:
return np.full(shape, fill_value)
else:
return np.full(shape, fill_value, dtype=type_as.dtype)
def eye(self, N, M=None, type_as=None):
if type_as is None:
return np.eye(N, M)
else:
return np.eye(N, M, dtype=type_as.dtype)
def sum(self, a, axis=None, keepdims=False):
return np.sum(a, axis, keepdims=keepdims)
def cumsum(self, a, axis=None):
return np.cumsum(a, axis)
def max(self, a, axis=None, keepdims=False):
return np.max(a, axis, keepdims=keepdims)
def min(self, a, axis=None, keepdims=False):
return np.min(a, axis, keepdims=keepdims)
def maximum(self, a, b):
return np.maximum(a, b)
def minimum(self, a, b):
return np.minimum(a, b)
def dot(self, a, b):
return np.dot(a, b)
def abs(self, a):
return np.abs(a)
def exp(self, a):
return np.exp(a)
def log(self, a):
return np.log(a)
def sqrt(self, a):
return np.sqrt(a)
def norm(self, a):
return np.sqrt(np.sum(np.square(a)))
def any(self, a):
return np.any(a)
def isnan(self, a):
return np.isnan(a)
def isinf(self, a):
return np.isinf(a)
def einsum(self, subscripts, *operands):
return np.einsum(subscripts, *operands)
def sort(self, a, axis=-1):
return np.sort(a, axis)
def argsort(self, a, axis=-1):
return np.argsort(a, axis)
def flip(self, a, axis=None):
return | np.flip(a, axis) | numpy.flip |
'''
Set of various image filters used for generating textures for models.
Uses numpy arrays with colors with colors encoded with values in range 0-1.
'''
# pylint: disable=invalid-name
from __future__ import annotations
from itertools import cycle, accumulate
from typing import Tuple, Iterable, NamedTuple, List, Optional, Sequence
from abc import ABC, abstractmethod
from enum import Enum
import numpy as np
from .bedrock_packs import Vector2d, Vector3d
class UvMaskTypes(Enum):
'''
UvMaskTypes are used for selecting one of the avaliable masks types in
dropdown lists.
'''
COLOR_PALLETTE_MASK='Color Palette Mask'
GRADIENT_MASK='Gradient Mask'
ELLIPSE_MASK='Ellipse Mask'
RECTANGLE_MASK='Rectangle Mask'
STRIPES_MASK='Stripes Mask'
RANDOM_MASK='Random Mask'
COLOR_MASK='Color Mask'
MIX_MASK='Mix Mask'
def list_mask_types_as_blender_enum(self, context):
'''
Passing list itself to some operators/panels didn't work.
This function is a workaround that uses alternative definition for
EnumProperty.
https://docs.blender.org/api/current/bpy.props.html#bpy.props.EnumProperty
'''
# pylint: disable=unused-argument
return [(i.value, i.value, i.value) for i in UvMaskTypes]
class MixMaskMode(Enum):
'''MixMaskMode is used to define the behavior of the MixMask'''
mean='mean'
min='min'
max='max'
median='median'
def list_mix_mask_modes_as_blender_enum(self, context):
'''
Returns list of tuples for creating EnumProperties with MixMaskMode enum.
'''
# pylint: disable=unused-argument
return [(i.value, i.value, i.value) for i in MixMaskMode]
class Mask(ABC):
'''Abstract class, parent of all Filters.'''
@abstractmethod
def apply(self, image: np.ndarray):
'''
Applies the image to the image.
:param image: The image filtered by the mask.
'''
class Color(NamedTuple):
'''Color palette color.'''
r: float
g: float
b: float
@staticmethod
def create_from_hex(color: str):
'''Creates color object from hex string e.g. "ffffff"'''
if len(color) != 6:
raise Exception(
'The color should be passed as 6 digit a hex number with '
'format "rrggbb"'
)
return Color(
int(color[:2], 16)/255.0,
int(color[2:4], 16)/255.0,
int(color[4:], 16)/255.0
)
class ColorPaletteMask(Mask):
'''
ColorPaletteMask is a mask that maps values (0 to 1) from the image to
colors from the color palette.
'''
def __init__(
self, colors: List[Color], *,
interpolate: bool = False,
normalize: bool = False):
self.colors = colors
self.interpolate = interpolate
self.normalize = normalize
def apply(self, image: np.ndarray):
# xp and fp for np.interp
if self.interpolate:
fp_r = [c.r for c in self.colors]
fp_g = [c.g for c in self.colors]
fp_b = [c.b for c in self.colors]
xp = np.array(list(range(len(self.colors))))
xp = xp/(len(self.colors)-1)
else:
def repeated_list(iterable):
for i in iterable:
yield i
yield i
fp_r = [c.r for c in repeated_list(self.colors)]
fp_g = [c.g for c in repeated_list(self.colors)]
fp_b = [c.b for c in repeated_list(self.colors)]
xp = np.array(list(range(len(self.colors))))
xp = xp/len(self.colors)
unpacked_xp = [0.0]
for xpi in repeated_list(xp[1:]):
unpacked_xp.append(xpi)
unpacked_xp.append(1.0)
xp = np.array(unpacked_xp)
# Input image must be converted to grayscale
gray = np.mean(image, axis=2)
if self.normalize:
gray = np.interp(
gray, [ | np.min(gray) | numpy.min |
#System Stack
import csv
#Science Stack
import numpy as np
from netCDF4 import Dataset
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
d = {}
count = 0
with open('/Users/bell/Data_Local/Reanalysis_Files/ncepstorms_2000_2005.txt','rb') as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
for row in tsvin:
d[count] = row[0].strip().split()
count = count + 1
lat = np.array([ d[k][13] for i,k in enumerate(d.keys())], float)
lon = -1 * np.array([ d[k][14] for i,k in enumerate(d.keys())], float) + 180.
year = np.array([ d[k][2] for i,k in enumerate(d.keys())], float)
boxnumlat = np.array([ d[k][-5] for i,k in enumerate(d.keys())], int)
boxnumlon = np.array([ d[k][-4] for i,k in enumerate(d.keys())], int)
idnum = np.array([ d[k][-1] for i,k in enumerate(d.keys())], int)
color = ['b','g','r','y','c']
freq_array = np.zeros(shape=(70,70))
lat_array = | np.zeros(shape=(70,70)) | numpy.zeros |
# coding: utf-8
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
__all__ = ['Ackley','Sphere','Rosenbrock','Beale','GoldsteinPrice','Booth',
'BukinN6','Matyas','LeviN13','ThreeHumpCamel','Easom','Eggholder',
'McCormick','SchafferN2','SchafferN4','StyblinskiTang','DeJongsF1',
'DeJongsF2','DeJongsF3','DeJongsF4','DeJongsF5','Ellipsoid','KTablet',
'FiveWellPotential','WeightedSphere','HyperEllipsodic',
'SumOfDifferentPower','Griewank','Michalewicz','Perm','Rastrigin',
'Schwefel','SixHumpCamel','Shuberts','XinSheYang','Zakharov']
__oneArgument__ = ['Beale','GoldsteinPrice','Booth','BukinN6','Matyas','LeviN13',
'ThreeHumpCamel','Easom','Eggholder','McCormick','SchafferN2',
'SchafferN4','DeJongsF3','DeJongsF4','DeJongsF5',
'FiveWellPotential','SixHumpCamel','Shuberts']
__twoArgument__ = ['Ackley','Sphere','Rosenbrock','StyblinskiTang','DeJongsF1',
'DeJongsF2','Ellipsoid','KTablet','WeightedSphere',
'HyperEllipsodic','SumOfDifferentPower','Griewank',
'Michalewicz','Rastrigin','Schwefel','XinSheYang','Zakharov']
__threeArgument__ = ['Perm']
##### Basic function #####
class OptimalBasic:
def __init__(self, variable_num):
self.variable_num = variable_num
self.max_search_range = np.array([0]*self.variable_num)
self.min_search_range = np.array([0]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = ''
self.save_dir = os.path.dirname(os.path.abspath(__file__))+'\\img\\'
if(os.path.isdir(self.save_dir) == False):
os.mkdir(self.save_dir)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_optimal_solution(self):
return self.optimal_solution
def get_search_range(self):
return [self.max_search_range, self.min_search_range]
def get_func_val(self, variables):
return -1
def plot(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.show()
def save_fig(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.savefig(self.save_dir+self.func_name+'.png')
plt.close()
##### Optimization benchmark function group #####
##### Class Ackley function #####
class Ackley(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([32.768]*self.variable_num)
self.min_search_range = np.array([-32.768]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'Ackley'
def get_func_val(self, variables):
tmp1 = 20.-20.*np.exp(-0.2*np.sqrt(1./self.variable_num*np.sum(np.square(variables))))
tmp2 = np.e-np.exp(1./self.variable_num*np.sum(np.cos(variables*2.*np.pi)))
return tmp1+tmp2
##### Class Sphere function #####
class Sphere(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1000]*self.variable_num) # nearly inf
self.min_search_range = np.array([-1000]*self.variable_num) # nearly inf
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'Sphere'
def get_func_val(self, variables):
return np.sum(np.square(variables))
##### Class Rosenbrock function #####
class Rosenbrock(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5]*self.variable_num)
self.min_search_range = np.array([-5]*self.variable_num)
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Rosenbrock'
def get_func_val(self, variables):
f = 0
for i in range(self.variable_num-1):
f += 100*np.power(variables[i+1]-np.power(variables[i],2),2)+np.power(variables[i]-1,2)
return f
##### Class Beale function #####
class Beale(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.5]*self.variable_num)
self.min_search_range = np.array([-4.5]*self.variable_num)
self.optimal_solution = np.array([3.,0.5])
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Beale'
def get_func_val(self, variables):
tmp1 = np.power(1.5 - variables[0] + variables[0] * variables[1],2)
tmp2 = np.power(2.25 - variables[0] + variables[0] * np.power(variables[1],2),2)
tmp3 = np.power(2.625 - variables[0] + variables[0] * np.power(variables[1],3),2)
return tmp1+tmp2+tmp3
##### Class Goldstein-Price function #####
class GoldsteinPrice(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([2.]*self.variable_num)
self.min_search_range = np.array([-2.]*self.variable_num)
self.optimal_solution = np.array([0.,-1.])
self.global_optimum_solution = 3
self.plot_place = 0.25
self.func_name = 'GoldsteinPrice'
def get_func_val(self, variables):
tmp1 = (1+np.power(variables[0]+variables[1]+1,2)*(19-14*variables[0]+3*np.power(variables[0],2)-14*variables[1]+6*variables[0]*variables[1]+3*np.power(variables[1],2)))
tmp2 = (30+(np.power(2*variables[0]-3*variables[1],2)*(18-32*variables[0]+12*np.power(variables[0],2)+48*variables[1]-36*variables[0]*variables[1]+27*np.power(variables[1],2))))
return tmp1*tmp2
##### Class Booth function #####
class Booth(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = | np.array([-10.]*self.variable_num) | numpy.array |
################################################################################
# Copyright (c) 2017-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for :py:mod:`katdal.chunkstore_s3`.
The tests require `minio`_ to be installed on the :envvar:`PATH`. If not found,
the test will be skipped.
Versions of minio prior to 2018-08-25T01:56:38Z contain a `race condition`_
that can cause it to crash when queried at the wrong point during startup. If
an older version is detected, the test will be skipped.
.. _minio: https://github.com/minio/minio
.. _race condition: https://github.com/minio/minio/issues/6324
"""
import contextlib
import http.server
import io
import os
import pathlib
import re
import shutil
import socket
import tempfile
import threading
import time
import urllib.parse
import warnings
import jwt
import katsdptelstate
import numpy as np
import requests
from katsdptelstate.rdb_writer import RDBWriter
from nose import SkipTest
from nose.tools import (assert_equal, assert_in, assert_not_in, assert_raises,
timed)
from numpy.testing import assert_array_equal
from urllib3.util.retry import Retry
from katdal.chunkstore import ChunkNotFound, StoreUnavailable
from katdal.chunkstore_s3 import (_DEFAULT_SERVER_GLITCHES, InvalidToken,
S3ChunkStore, TruncatedRead, _AWSAuth,
decode_jwt, read_array)
from katdal.datasources import TelstateDataSource
from katdal.test.s3_utils import MissingProgram, S3Server, S3User
from katdal.test.test_chunkstore import ChunkStoreTestBase
from katdal.test.test_datasources import (assert_telstate_data_source_equal,
make_fake_data_source)
# Use a standard bucket for most tests to ensure valid bucket name (regex '^[0-9a-z.-]{3,63}$')
BUCKET = 'katdal-unittest'
# Also authorise this prefix for tests that will make their own buckets
PREFIX = '1234567890'
# Pick quick but different timeouts and retries for unit tests:
# - The effective connect timeout is 5.0 (initial) + 5.0 (1 retry) = 10 seconds
# - The effective read timeout is 0.4 + 0.4 = 0.8 seconds
# - The effective status timeout is 0.1 * (0 + 2 + 4) = 0.6 seconds, or
# 4 * 0.1 + 0.6 = 1.0 second if the suggestions use SUGGESTED_STATUS_DELAY
TIMEOUT = (5.0, 0.4)
RETRY = Retry(connect=1, read=1, status=3, backoff_factor=0.1,
raise_on_status=False, status_forcelist=_DEFAULT_SERVER_GLITCHES)
SUGGESTED_STATUS_DELAY = 0.1
READ_PAUSE = 0.1
@contextlib.contextmanager
def get_free_port(host):
"""Get an unused port number.
This is a context manager that returns a port, while holding open the
socket bound to it. This prevents another ephemeral process from
obtaining the port in the meantime. The target process should bind the
port with SO_REUSEPORT, after which the context should be exited to close
the temporary socket.
"""
with contextlib.closing(socket.socket()) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
yield port
class TestReadArray:
def _test(self, array):
fp = io.BytesIO()
np.save(fp, array)
fp.seek(0)
out = read_array(fp)
np.testing.assert_equal(array, out)
# Check that Fortran order was preserved
assert_equal(array.strides, out.strides)
def testSimple(self):
self._test(np.arange(20))
def testMultiDim(self):
self._test(np.arange(20).reshape(4, 5, 1))
def testFortran(self):
self._test(np.arange(20).reshape(4, 5, 1).T)
def testV2(self):
# Make dtype that needs more than 64K to store, forcing .npy version 2.0
dtype = np.dtype([('a' * 70000, np.float32), ('b', np.float32)])
with warnings.catch_warnings():
# Suppress warning that V2 files can only be read by numpy >= 1.9
warnings.simplefilter('ignore', category=UserWarning)
self._test(np.zeros(100, dtype))
def testBadVersion(self):
data = b'\x93NUMPY\x03\x04' # Version 3.4
fp = io.BytesIO(data)
with assert_raises(ValueError):
read_array(fp)
def testPickled(self):
array = np.array([str, object])
fp = io.BytesIO()
np.save(fp, array)
fp.seek(0)
with assert_raises(ValueError):
read_array(fp)
def _truncate_and_fail_to_read(self, *args):
fp = io.BytesIO()
np.save(fp, | np.arange(20) | numpy.arange |
# Copyright (c) <NAME>, <NAME>, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Offline Bandit Algorithms."""
from collections import OrderedDict
from dataclasses import dataclass
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from scipy.special import softmax
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import clone
from sklearn.base import is_classifier
from sklearn.linear_model import LogisticRegression
from sklearn.utils import check_random_state
from sklearn.utils import check_scalar
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
import torch.optim as optim
from tqdm import tqdm
from obp.ope import RegressionModel
from ..utils import check_array
from ..utils import check_bandit_feedback_inputs
from ..utils import check_tensor
from ..utils import softmax as softmax_axis1
from .base import BaseOfflinePolicyLearner
@dataclass
class IPWLearner(BaseOfflinePolicyLearner):
"""Off-policy learner based on Inverse Probability Weighting and Supervised Classification.
Parameters
-----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
base_classifier: ClassifierMixin
Machine learning classifier used to train an offline decision making policy.
References
------------
<NAME>, <NAME>, <NAME>, and <NAME>.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Large-scale Validation of Counterfactual Learning Methods: A Test-Bed.", 2016.
"""
base_classifier: Optional[ClassifierMixin] = None
def __post_init__(self) -> None:
"""Initialize class."""
super().__post_init__()
if self.base_classifier is None:
self.base_classifier = LogisticRegression(random_state=12345)
else:
if not is_classifier(self.base_classifier):
raise ValueError("`base_classifier` must be a classifier")
self.base_classifier_list = [
clone(self.base_classifier) for _ in np.arange(self.len_list)
]
@staticmethod
def _create_train_data_for_opl(
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Create training data for off-policy learning.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
Returns
--------
(X, sample_weight, y): Tuple[np.ndarray, np.ndarray, np.ndarray]
Feature vectors, sample weights, and outcome for training the base machine learning model.
"""
return context, (reward / pscore), action
def fit(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
"""Fits an offline bandit policy on the given logged bandit data.
Note
--------
This `fit` method trains a deterministic policy :math:`\\pi: \\mathcal{X} \\rightarrow \\mathcal{A}`
via a cost-sensitive classification reduction as follows:
.. math::
\\hat{\\pi}
& \\in \\arg \\max_{\\pi \\in \\Pi} \\hat{V}_{\\mathrm{IPW}} (\\pi ; \\mathcal{D}) \\\\
& = \\arg \\max_{\\pi \\in \\Pi} \\mathbb{E}_{n} \\left[\\frac{\\mathbb{I} \\{\\pi (x_{i})=a_{i} \\}}{\\pi_{b}(a_{i} | x_{i})} r_{i} \\right] \\\\
& = \\arg \\min_{\\pi \\in \\Pi} \\mathbb{E}_{n} \\left[\\frac{r_i}{\\pi_{b}(a_{i} | x_{i})} \\mathbb{I} \\{\\pi (x_{i}) \\neq a_{i} \\} \\right],
where :math:`\\mathbb{E}_{n} [\cdot]` is the empirical average over observations in :math:`\\mathcal{D}`.
See the reference for the details.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a learner assumes that only a single action is chosen for each data.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=reward,
pscore=pscore,
position=position,
)
if (reward < 0).any():
raise ValueError(
"A negative value is found in `reward`."
"`obp.policy.IPWLearner` cannot handle negative rewards,"
"and please use `obp.policy.NNPolicyLearner` instead."
)
if pscore is None:
n_actions = np.int32(action.max() + 1)
pscore = np.ones_like(action) / n_actions
if self.len_list == 1:
position = np.zeros_like(action, dtype=int)
else:
if position is None:
raise ValueError("When `self.len_list > 1`, `position` must be given.")
for p in np.arange(self.len_list):
X, sample_weight, y = self._create_train_data_for_opl(
context=context[position == p],
action=action[position == p],
reward=reward[position == p],
pscore=pscore[position == p],
)
self.base_classifier_list[p].fit(X=X, y=y, sample_weight=sample_weight)
def predict(self, context: np.ndarray) -> np.ndarray:
"""Predict best actions for new data.
Note
--------
Action set predicted by this `predict` method can contain duplicate items.
If a non-repetitive action set is needed, please use the `sample_action` method.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Action choices made by a classifier, which can contain duplicate items.
If a non-repetitive action set is needed, please use the `sample_action` method.
"""
check_array(array=context, name="context", expected_dim=2)
n_rounds = context.shape[0]
action_dist = np.zeros((n_rounds, self.n_actions, self.len_list))
for p in np.arange(self.len_list):
predicted_actions_at_position = self.base_classifier_list[p].predict(
context
)
action_dist[
np.arange(n_rounds),
predicted_actions_at_position,
np.ones(n_rounds, dtype=int) * p,
] += 1
return action_dist
def predict_score(self, context: np.ndarray) -> np.ndarray:
"""Predict non-negative scores for all possible pairs of actions and positions.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
score_predicted: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Scores for all possible pairs of actions and positions predicted by a classifier.
"""
check_array(array=context, name="context", expected_dim=2)
n = context.shape[0]
score_predicted = np.zeros((n, self.n_actions, self.len_list))
for p in np.arange(self.len_list):
score_predicteds_at_position = self.base_classifier_list[p].predict_proba(
context
)
score_predicted[:, :, p] = score_predicteds_at_position
return score_predicted
def sample_action(
self,
context: np.ndarray,
tau: Union[int, float] = 1.0,
random_state: Optional[int] = None,
) -> np.ndarray:
"""Sample a ranking of (non-repetitive) actions from the Plackett-Luce ranking distribution.
Note
--------
This `sample_action` method samples a **non-repetitive** ranking of actions for new data
:math:`x \\in \\mathcal{X}` via the so-called "Gumbel Softmax trick" as follows.
.. math::
\\s (x,a) = \\hat{f}(x,a) / \\tau + \\gamma_{x,a}, \\quad \\gamma_{x,a} \\sim \\mathrm{Gumbel}(0,1)
:math:`\\tau` is a temperature hyperparameter.
:math:`f: \\mathcal{X} \\times \\mathcal{A} \\times \\mathcal{K} \\rightarrow \\mathbb{R}_{+}`
is a scoring function which is now implemented in the `predict_score` method.
When `len_list > 0`, the expected rewards estimated at different positions will be averaged to form :math:`f(x,a)`.
:math:`\\gamma_{x,a}` is a random variable sampled from the Gumbel distribution.
By sorting the actions based on :math:`\\s (x,a)` for each context, we can efficiently sample a ranking from
the Plackett-Luce ranking distribution.
Parameters
----------------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
tau: int or float, default=1.0
A temperature parameter that controls the randomness of the action choice
by scaling the scores before applying softmax.
As :math:`\\tau \\rightarrow \\infty`, the algorithm will select arms uniformly at random.
random_state: int, default=None
Controls the random seed in sampling actions.
Returns
-----------
sampled_ranking: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Ranking of actions sampled via the Gumbel softmax trick.
"""
check_array(array=context, name="context", expected_dim=2)
check_scalar(tau, name="tau", target_type=(int, float), min_val=0)
n = context.shape[0]
random_ = check_random_state(random_state)
sampled_ranking = np.zeros((n, self.n_actions, self.len_list))
scores = self.predict_score(context=context).mean(2) / tau
scores += random_.gumbel(size=scores.shape)
sampled_ranking_full = np.argsort(-scores, axis=1)
for p in np.arange(self.len_list):
sampled_ranking[np.arange(n), sampled_ranking_full[:, p], p] = 1
return sampled_ranking
def predict_proba(
self,
context: np.ndarray,
tau: Union[int, float] = 1.0,
) -> np.ndarray:
"""Obtains action choice probabilities for new data based on scores predicted by a classifier.
Note
--------
This `predict_proba` method obtains action choice probabilities for new data :math:`x \\in \\mathcal{X}`
by applying the softmax function as follows:
.. math::
P (A = a | x) = \\frac{\\mathrm{exp}(f(x,a) / \\tau)}{\\sum_{a^{\\prime} \\in \\mathcal{A}} \\mathrm{exp}(f(x,a^{\\prime}) / \\tau)},
where :math:`A` is a random variable representing an action, and :math:`\\tau` is a temperature hyperparameter.
:math:`f: \\mathcal{X} \\times \\mathcal{A} \\rightarrow \\mathbb{R}_{+}`
is a scoring function which is now implemented in the `predict_score` method.
**Note that this method can be used only when `len_list=1`, please use the `sample_action` method otherwise.**
Parameters
----------------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
tau: int or float, default=1.0
A temperature parameter that controls the randomness of the action choice
by scaling the scores before applying softmax.
As :math:`\\tau \\rightarrow \\infty`, the algorithm will select arms uniformly at random.
Returns
-----------
choice_prob: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Action choice probabilities obtained by a trained classifier.
"""
assert (
self.len_list == 1
), "predict_proba method cannot be used when `len_list != 1`"
check_array(array=context, name="context", expected_dim=2)
check_scalar(tau, name="tau", target_type=(int, float), min_val=0)
score_predicted = self.predict_score(context=context)
choice_prob = softmax(score_predicted / tau, axis=1)
return choice_prob
@dataclass
class QLearner(BaseOfflinePolicyLearner):
"""Off-policy learner based on Direct Method.
Parameters
-----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
base_model: BaseEstimator
Machine learning model used to estimate the q function (expected reward function).
fitting_method: str, default='normal'
Method to fit the regression model.
Must be one of ['normal', 'iw'] where 'iw' stands for importance weighting.
"""
base_model: Optional[BaseEstimator] = None
fitting_method: str = "normal"
def __post_init__(self) -> None:
"""Initialize class."""
super().__post_init__()
self.q_estimator = RegressionModel(
n_actions=self.n_actions,
len_list=self.len_list,
base_model=self.base_model,
fitting_method=self.fitting_method,
)
def fit(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
"""Fits an offline bandit policy on the given logged bandit feedback data.
Note
--------
This `fit` method trains an estimator for the q function :math:`\\q(x,a) := \\mathbb{E} [r \\mid x, a]` as follows.
.. math::
\\hat{\\q} \\in \\arg \\min_{\\q \\in \\Q} \\mathbb{E}_{n} [ \\ell ( r_i, q (x_i,a_i) ) ]
where :math:`\\ell` is a loss function in training the q estimator.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a learner assumes that only a single action is chosen for each data.
When `len_list` > 1, position has to be set.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=reward,
pscore=pscore,
position=position,
)
if pscore is None:
n_actions = np.int32(action.max() + 1)
pscore = np.ones_like(action) / n_actions
if self.len_list == 1:
position = np.zeros_like(action, dtype=int)
else:
if position is None:
raise ValueError("When `self.len_list > 1`, `position` must be given.")
unif_action_dist = np.ones((context.shape[0], self.n_actions, self.len_list))
self.q_estimator.fit(
context=context,
action=action,
reward=reward,
position=position,
pscore=pscore,
action_dist=unif_action_dist,
)
def predict(
self,
context: np.ndarray,
tau: Union[int, float] = 1.0,
) -> np.ndarray:
"""Predict best actions for new data deterministically.
Note
--------
This `predict` method predicts the best actions for new data deterministically as follows.
.. math::
\\hat{a}_i \\in \\arg \\max_{a \\in \\mathcal{A}} \\hat{q}(x_i, a)
where :math:`\\hat{q}(x,a)` is an estimator for the q function :math:`\\q(x,a) := \\mathbb{E} [r \\mid x, a]`.
Note that action sets predicted by this `predict` method can contain duplicate items.
If a non-repetitive action set is needed, please use the `sample_action` method.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Deterministic action choices made by the QLearner.
The output can contain duplicated items (when `len_list > 1`).
"""
check_array(array=context, name="context", expected_dim=2)
check_scalar(tau, name="tau", target_type=(int, float), min_val=0)
q_hat = self.predict_score(context=context)
q_hat_argmax = np.argmax(q_hat, axis=1).astype(int)
n = context.shape[0]
action_dist = np.zeros_like(q_hat)
for p in np.arange(self.len_list):
action_dist[np.arange(n), q_hat_argmax[:, p], p] = 1
return action_dist
def predict_score(self, context: np.ndarray) -> np.ndarray:
"""Predict the expected rewards for all possible pairs of actions and positions.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
q_hat: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Expected rewards for all possible pairs of actions and positions. :math:`\\hat{q}(x,a)`.
"""
check_array(array=context, name="context", expected_dim=2)
q_hat = self.q_estimator.predict(context=context)
return q_hat
def sample_action(
self,
context: np.ndarray,
tau: Union[int, float] = 1.0,
random_state: Optional[int] = None,
) -> np.ndarray:
"""Sample a ranking of (non-repetitive) actions from the Plackett-Luce ranking distribution.
Note
--------
This `sample_action` method samples a ranking of (non-repetitive) actions for new data
based on :math:`\\hat{q}` and the so-called "Gumbel Softmax trick" as follows.
.. math::
\\s (x,a) = \\hat{q}(x,a) / \\tau + \\gamma_{x,a}, \\quad \\gamma_{x,a} \\sim \\mathrm{Gumbel}(0,1)
:math:`\\tau` is a temperature hyperparameter.
:math:`\\hat{q}: \\mathcal{X} \\times \\mathcal{A} \\times \\mathcal{K} \\rightarrow \\mathbb{R}_{+}`
is a q function estimator, which is now implemented in the `predict_score` method.
When `len_list > 0`, the expected rewards estimated at different positions will be averaged to form :math:`f(x,a)`.
:math:`\\gamma_{x,a}` is a random variable sampled from the Gumbel distribution.
By sorting the actions based on :math:`\\s (x,a)` for each context, we can efficiently sample a ranking from
the Plackett-Luce ranking distribution.
Parameters
----------------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
tau: int or float, default=1.0
A temperature parameter that controls the randomness of the action choice
by scaling the scores before applying softmax.
As :math:`\\tau \\rightarrow \\infty`, the algorithm will select arms uniformly at random.
random_state: int, default=None
Controls the random seed in sampling actions.
Returns
-----------
sampled_action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Ranking of actions sampled from the Plackett-Luce ranking distribution via the Gumbel softmax trick.
"""
check_array(array=context, name="context", expected_dim=2)
check_scalar(tau, name="tau", target_type=(int, float), min_val=0)
n = context.shape[0]
random_ = check_random_state(random_state)
sampled_action = np.zeros((n, self.n_actions, self.len_list))
scores = self.predict_score(context=context).mean(2) / tau
scores += random_.gumbel(size=scores.shape)
ranking = np.argsort(-scores, axis=1)
for p in np.arange(self.len_list):
sampled_action[np.arange(n), ranking[:, p], p] = 1
return sampled_action
def predict_proba(
self,
context: np.ndarray,
tau: Union[int, float] = 1.0,
) -> np.ndarray:
"""Obtains action choice probabilities for new data based on the estimated expected rewards.
Note
--------
This `predict_proba` method obtains action choice probabilities for new data based on :math:`\\hat{q}` as follows.
.. math::
\\pi_{l} (a|x) = \\frac{\\mathrm{exp}( \\hat{q}_{l}(x,a) / \\tau)}{\\sum_{a^{\\prime} \\in \\mathcal{A}} \\mathrm{exp}( \\hat{q}_{l}(x,a^{\\prime}) / \\tau)}
where :math:`\\pi_{l} (a|x)` is the resulting action choice probabilities at position :math:`l`.
:math:`\\tau` is a temperature hyperparameter.
:math:`\\hat{q}: \\mathcal{X} \\times \\mathcal{A} \\times \\mathcal{K} \\rightarrow \\mathbb{R}_{+}`
is a q function estimator for position :math:`l`, which is now implemented in the `predict_score` method.
Parameters
----------------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
tau: int or float, default=1.0
A temperature parameter that controls the randomness of the action choice
by scaling the scores before applying softmax.
As :math:`\\tau \\rightarrow \\infty`, the algorithm will select arms uniformly at random.
Returns
-----------
action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Action choice probabilities obtained from the estimated expected rewards.
"""
check_array(array=context, name="context", expected_dim=2)
check_scalar(tau, name="tau", target_type=(int, float), min_val=0)
q_hat = self.predict_score(context=context)
action_dist = softmax_axis1(q_hat / tau)
return action_dist
@dataclass
class NNPolicyLearner(BaseOfflinePolicyLearner):
"""Off-policy learner parameterized by a neural network.
Parameters
-----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
dim_context: int
Number of dimensions of context vectors.
policy_reg_param: float, default=0.0
A hypeparameter to control the policy regularization. :math:`\\lambda_{pol}`.
var_reg_param: float, default=0.0
A hypeparameter to control the variance regularization. :math:`\\lambda_{var}`.
off_policy_objective: str
An OPE estimator to estimate the objective function.
Must be one of `dm`, `ipw`, and `dr`.
They stand for
- Direct Method
- Inverse Probability Weighting
- Doubly Robust
, respectively.
hidden_layer_size: Tuple[int, ...], default = (100,)
The i-th element specifies the size of the i-th layer.
activation: str, default='relu'
Activation function.
Must be one of the followings:
- 'identity', the identity function, :math:`f(x) = x`.
- 'logistic', the sigmoid function, :math:`f(x) = \\frac{1}{1 + \\exp(x)}`.
- 'tanh', the hyperbolic tangent function, `:math:f(x) = \\frac{\\exp(x) - \\exp(-x)}{\\exp(x) + \\exp(-x)}`
- 'relu', the rectified linear unit function, `:math:f(x) = \\max(0, x)`
solver: str, default='adam'
Optimizer of the neural network.
Must be one of the followings:
- 'sgd', Stochastic Gradient Descent.
- 'adam', Adam (Kingma and Ba 2014).
- 'adagrad', Adagrad (Duchi et al. 2011).
alpha: float, default=0.001
L2 penalty.
batch_size: Union[int, str], default="auto"
Batch size for SGD, Adagrad, and Adam.
If "auto", the maximum of 200 and the number of samples is used.
If integer, must be positive.
learning_rate_init: int, default=0.0001
Initial learning rate for SGD, Adagrad, and Adam.
max_iter: int, default=200
Number of epochs for SGD, Adagrad, and Adam.
shuffle: bool, default=True
Whether to shuffle samples in SGD and Adam.
random_state: Optional[int], default=None
Controls the random seed.
tol: float, default=1e-4
Tolerance for training.
When the training loss is not improved at least `tol' for `n_iter_no_change' consecutive iterations,
training is stopped.
momentum: float, default=0.9
Momentum for SGD.
Must be in the range of [0., 1.].
nesterovs_momentum: bool, default=True
Whether to use Nesterovs momentum.
early_stopping: bool, default=False
Whether to use early stopping for SGD, Adagrad, and Adam.
If set to true, `validation_fraction' of training data is used as validation data,
and training is stopped when the validation loss is not improved at least `tol' for `n_iter_no_change' consecutive iterations.
validation_fraction: float, default=0.1
Fraction of validation data when early stopping is used.
Must be in the range of (0., 1.].
beta_1: float, default=0.9
Coefficient used for computing running average of gradient for Adam.
Must be in the range of [0., 1.].
beta_2: float, default=0.999
Coefficient used for computing running average of the square of gradient for Adam.
Must be in the range of [0., 1.].
epsilon: float, default=1e-8
Term for numerical stability in Adam.
n_iter_no_change: int, default=10
Maximum number of not improving epochs when early stopping is used.
q_func_estimator_hyperparams: Dict, default=None
A set of hyperparameters to define q function estimator.
References:
------------
<NAME> and <NAME>.
"On the Limited Memory Method for Large Scale Optimization.", 1989
<NAME> and <NAME>.
"Adam: A Method for Stochastic Optimization.", 2014
<NAME>, <NAME>, and <NAME>.
"Adaptive Subgradient Methods for Online Learning and Stochastic Optimization", 2011.
"""
dim_context: Optional[int] = None
off_policy_objective: Optional[str] = None
policy_reg_param: float = 0.0
var_reg_param: float = 0.0
hidden_layer_size: Tuple[int, ...] = (100,)
activation: str = "relu"
solver: str = "adam"
alpha: float = 0.0001
batch_size: Union[int, str] = "auto"
learning_rate_init: float = 0.0001
max_iter: int = 200
shuffle: bool = True
random_state: Optional[int] = None
tol: float = 1e-4
momentum: float = 0.9
nesterovs_momentum: bool = True
early_stopping: bool = False
validation_fraction: float = 0.1
beta_1: float = 0.9
beta_2: float = 0.999
epsilon: float = 1e-8
n_iter_no_change: int = 10
q_func_estimator_hyperparams: Optional[Dict] = None
def __post_init__(self) -> None:
"""Initialize class."""
super().__post_init__()
check_scalar(self.dim_context, "dim_context", int, min_val=1)
if self.off_policy_objective not in ["dm", "ipw", "dr"]:
raise ValueError(
"`off_policy_objective` must be one of 'dm', 'ipw', or 'dr'"
f", but {self.off_policy_objective} is given"
)
check_scalar(
self.policy_reg_param,
"policy_reg_param",
(int, float),
min_val=0.0,
)
check_scalar(
self.var_reg_param,
"var_reg_param",
(int, float),
min_val=0.0,
)
if not isinstance(self.hidden_layer_size, tuple) or any(
[not isinstance(h, int) or h <= 0 for h in self.hidden_layer_size]
):
raise ValueError(
f"`hidden_layer_size` must be a tuple of positive integers, but {self.hidden_layer_size} is given"
)
if self.solver not in ("adagrad", "sgd", "adam"):
raise ValueError(
f"`solver` must be one of 'adam', 'adagrad', or 'sgd', but {self.solver} is given"
)
check_scalar(self.alpha, "alpha", float, min_val=0.0)
if self.batch_size != "auto" and (
not isinstance(self.batch_size, int) or self.batch_size <= 0
):
raise ValueError(
f"`batch_size` must be a positive integer or 'auto', but {self.batch_size} is given"
)
check_scalar(self.learning_rate_init, "learning_rate_init", float)
if self.learning_rate_init <= 0.0:
raise ValueError(
f"`learning_rate_init`= {self.learning_rate_init}, must be > 0.0"
)
check_scalar(self.max_iter, "max_iter", int, min_val=1)
if not isinstance(self.shuffle, bool):
raise ValueError(f"`shuffle` must be a bool, but {self.shuffle} is given")
check_scalar(self.tol, "tol", float)
if self.tol <= 0.0:
raise ValueError(f"`tol`= {self.tol}, must be > 0.0")
check_scalar(self.momentum, "momentum", float, min_val=0.0, max_val=1.0)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError(
f"`nesterovs_momentum` must be a bool, but {self.nesterovs_momentum} is given"
)
if not isinstance(self.early_stopping, bool):
raise ValueError(
f"`early_stopping` must be a bool, but {self.early_stopping} is given"
)
check_scalar(
self.validation_fraction, "validation_fraction", float, max_val=1.0
)
if self.validation_fraction <= 0.0:
raise ValueError(
f"`validation_fraction`= {self.validation_fraction}, must be > 0.0"
)
if self.q_func_estimator_hyperparams is not None:
if not isinstance(self.q_func_estimator_hyperparams, dict):
raise ValueError(
"`q_func_estimator_hyperparams` must be a dict"
f", but {type(self.q_func_estimator_hyperparams)} is given"
)
check_scalar(self.beta_1, "beta_1", float, min_val=0.0, max_val=1.0)
check_scalar(self.beta_2, "beta_2", float, min_val=0.0, max_val=1.0)
check_scalar(self.epsilon, "epsilon", float, min_val=0.0)
check_scalar(self.n_iter_no_change, "n_iter_no_change", int, min_val=1)
if self.random_state is not None:
self.random_ = check_random_state(self.random_state)
torch.manual_seed(self.random_state)
if self.activation == "identity":
activation_layer = nn.Identity
elif self.activation == "logistic":
activation_layer = nn.Sigmoid
elif self.activation == "tanh":
activation_layer = nn.Tanh
elif self.activation == "relu":
activation_layer = nn.ReLU
elif self.activation == "elu":
activation_layer = nn.ELU
else:
raise ValueError(
"`activation` must be one of 'identity', 'logistic', 'tanh', 'relu', or 'elu'"
f", but {self.activation} is given"
)
layer_list = []
input_size = self.dim_context
for i, h in enumerate(self.hidden_layer_size):
layer_list.append(("l{}".format(i), nn.Linear(input_size, h)))
layer_list.append(("a{}".format(i), activation_layer()))
input_size = h
layer_list.append(("output", nn.Linear(input_size, self.n_actions)))
layer_list.append(("softmax", nn.Softmax(dim=1)))
self.nn_model = nn.Sequential(OrderedDict(layer_list))
if self.off_policy_objective != "ipw":
if self.q_func_estimator_hyperparams is not None:
self.q_func_estimator_hyperparams["n_actions"] = self.n_actions
self.q_func_estimator_hyperparams["dim_context"] = self.dim_context
self.q_func_estimator = QFuncEstimator(
**self.q_func_estimator_hyperparams
)
else:
self.q_func_estimator = QFuncEstimator(
n_actions=self.n_actions, dim_context=self.dim_context
)
def _create_train_data_for_opl(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
position: np.ndarray,
**kwargs,
) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.DataLoader]]:
"""Create training data for off-policy learning.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a learner assumes that only a single action is chosen for each data.
Returns
--------
(training_data_loader, validation_data_loader): Tuple[DataLoader, Optional[DataLoader]]
Training and validation data loaders in PyTorch
"""
if self.batch_size == "auto":
batch_size_ = min(200, context.shape[0])
else:
check_scalar(self.batch_size, "batch_size", int, min_val=1)
batch_size_ = self.batch_size
dataset = NNPolicyDataset(
torch.from_numpy(context).float(),
torch.from_numpy(action).long(),
torch.from_numpy(reward).float(),
torch.from_numpy(pscore).float(),
torch.from_numpy(position).float(),
)
if self.early_stopping:
if context.shape[0] <= 1:
raise ValueError(
f"the number of samples is too small ({context.shape[0]}) to create validation data"
)
validation_size = max(int(context.shape[0] * self.validation_fraction), 1)
training_size = context.shape[0] - validation_size
training_dataset, validation_dataset = torch.utils.data.random_split(
dataset, [training_size, validation_size]
)
training_data_loader = torch.utils.data.DataLoader(
training_dataset,
batch_size=batch_size_,
shuffle=self.shuffle,
)
validation_data_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=batch_size_,
shuffle=self.shuffle,
)
return training_data_loader, validation_data_loader
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size_,
shuffle=self.shuffle,
)
return data_loader, None
def fit(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
"""Fits an offline bandit policy on the given logged bandit data.
Note
----------
Given the training data :math:`\\mathcal{D}`, this policy maximizes the following objective function:
.. math::
\\hat{V}(\\pi_\\theta; \\mathcal{D}) - \\alpha \\Omega(\\theta)
where :math:`\\hat{V}` is an OPE estimator and :math:`\\alpha \\Omega(\\theta)` is a regularization term.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a learner assumes that only a single action is chosen for each data.
When `len_list` > 1, position has to be set.
Currently, this feature is not supported.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=reward,
pscore=pscore,
position=position,
)
if context.shape[1] != self.dim_context:
raise ValueError(
"Expected `context.shape[1] == self.dim_context`, but found it False"
)
if pscore is None:
pscore = np.ones_like(action) / self.n_actions
if self.len_list == 1:
position = np.zeros_like(action, dtype=int)
# train q function estimator when it is needed to train NNPolicy
if self.off_policy_objective != "ipw":
self.q_func_estimator.fit(
context=context,
action=action,
reward=reward,
)
if self.solver == "sgd":
optimizer = optim.SGD(
self.nn_model.parameters(),
lr=self.learning_rate_init,
momentum=self.momentum,
weight_decay=self.alpha,
nesterov=self.nesterovs_momentum,
)
elif self.solver == "adagrad":
optimizer = optim.Adagrad(
self.nn_model.parameters(),
lr=self.learning_rate_init,
eps=self.epsilon,
weight_decay=self.alpha,
)
elif self.solver == "adam":
optimizer = optim.Adam(
self.nn_model.parameters(),
lr=self.learning_rate_init,
betas=(self.beta_1, self.beta_2),
eps=self.epsilon,
weight_decay=self.alpha,
)
else:
raise NotImplementedError(
"`solver` must be one of 'adam', 'adagrad', or 'sgd'"
)
training_data_loader, validation_data_loader = self._create_train_data_for_opl(
context, action, reward, pscore, position
)
# start policy training
n_not_improving_training = 0
previous_training_loss = None
n_not_improving_validation = 0
previous_validation_loss = None
for _ in tqdm(np.arange(self.max_iter), desc="policy learning"):
self.nn_model.train()
for x, a, r, p, pos in training_data_loader:
optimizer.zero_grad()
action_dist_by_current_policy = self.nn_model(x).unsqueeze(-1)
policy_value_arr = self._estimate_policy_value(
context=x,
reward=r,
action=a,
pscore=p,
action_dist=action_dist_by_current_policy,
position=pos,
)
policy_constraint = self._estimate_policy_constraint(
action=a,
pscore=p,
action_dist=action_dist_by_current_policy,
)
variance_constraint = torch.var(policy_value_arr)
negative_loss = policy_value_arr.mean()
negative_loss += self.policy_reg_param * policy_constraint
negative_loss -= self.var_reg_param * variance_constraint
loss = -negative_loss
loss.backward()
optimizer.step()
loss_value = loss.item()
if previous_training_loss is not None:
if loss_value - previous_training_loss < self.tol:
n_not_improving_training += 1
else:
n_not_improving_training = 0
if n_not_improving_training >= self.n_iter_no_change:
break
previous_training_loss = loss_value
if self.early_stopping:
self.nn_model.eval()
for x, a, r, p, pos in validation_data_loader:
action_dist_by_current_policy = self.nn_model(x).unsqueeze(-1)
policy_value_arr = self._estimate_policy_value(
context=x,
reward=r,
action=a,
pscore=p,
action_dist=action_dist_by_current_policy,
position=pos,
)
policy_constraint = self._estimate_policy_constraint(
action=a,
pscore=p,
action_dist=action_dist_by_current_policy,
)
variance_constraint = torch.var(policy_value_arr)
negative_loss = policy_value_arr.mean()
negative_loss += self.policy_reg_param * policy_constraint
negative_loss -= self.var_reg_param * variance_constraint
loss = -negative_loss
loss_value = loss.item()
if previous_validation_loss is not None:
if loss_value - previous_validation_loss < self.tol:
n_not_improving_validation += 1
else:
n_not_improving_validation = 0
if n_not_improving_validation > self.n_iter_no_change:
break
previous_validation_loss = loss_value
def _estimate_policy_value(
self,
context: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
pscore: torch.Tensor,
action_dist: torch.Tensor,
position: torch.Tensor,
) -> torch.Tensor:
"""Calculate policy loss used in the policy gradient method.
Parameters
-----------
context: array-like, shape (batch_size, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (batch_size,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (batch_size,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (batch_size,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
action_dist: array-like, shape (batch_size, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
Returns
----------
estimated_policy_grad: array-like, shape (batch_size,)
Rewards of each data estimated by an OPE estimator.
"""
current_pi = action_dist[:, :, 0].detach()
log_prob = torch.log(action_dist[:, :, 0])
idx_tensor = torch.arange(action.shape[0], dtype=torch.long)
if self.off_policy_objective == "dm":
q_hat = self.q_func_estimator.predict(
context=context,
)
estimated_policy_grad = torch.sum(q_hat * current_pi * log_prob, dim=1)
elif self.off_policy_objective == "ipw":
iw = current_pi[idx_tensor, action] / pscore
estimated_policy_grad = iw * reward
estimated_policy_grad *= log_prob[idx_tensor, action]
elif self.off_policy_objective == "dr":
q_hat = self.q_func_estimator.predict(
context=context,
)
q_hat_factual = q_hat[idx_tensor, action]
iw = current_pi[idx_tensor, action] / pscore
estimated_policy_grad = iw * (reward - q_hat_factual)
estimated_policy_grad *= log_prob[idx_tensor, action]
estimated_policy_grad += torch.sum(q_hat * current_pi * log_prob, dim=1)
return estimated_policy_grad
def _estimate_policy_constraint(
self,
action: torch.Tensor,
pscore: torch.Tensor,
action_dist: torch.Tensor,
) -> torch.Tensor:
"""Estimate the policy constraint term.
Parameters
-----------
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
"""
idx_tensor = torch.arange(action.shape[0], dtype=torch.long)
iw = action_dist[idx_tensor, action, 0] / pscore
return torch.log(iw.mean())
def predict(self, context: np.ndarray) -> np.ndarray:
"""Predict best actions for new data.
Note
--------
Action set predicted by this `predict` method can contain duplicate items.
If a non-repetitive action set is needed, please use the `sample_action` method.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Action choices made by a classifier, which can contain duplicate items.
If a non-repetitive action set is needed, please use the `sample_action` method.
"""
check_array(array=context, name="context", expected_dim=2)
if context.shape[1] != self.dim_context:
raise ValueError(
"Expected `context.shape[1] == self.dim_context`, but found it False"
)
self.nn_model.eval()
x = torch.from_numpy(context).float()
y = self.nn_model(x).detach().numpy()
n = context.shape[0]
predicted_actions = np.argmax(y, axis=1)
action_dist = | np.zeros((n, self.n_actions, 1)) | numpy.zeros |
"""
{This script calculates spread in velocity dispersion (sigma) from mocks for
red and blue galaxies as well as smf for red and blue galaxies. It then
finds a non-gaussian distribution that best fits the error in spread
distributions in each bin.}
"""
from cosmo_utils.utils import work_paths as cwpaths
from scipy.stats import normaltest as nt
from chainconsumer import ChainConsumer
from multiprocessing import Pool
import matplotlib.pyplot as plt
from scipy.stats import chi2
from matplotlib import rc
from scipy import stats
import pandas as pd
import numpy as np
import emcee
import math
import os
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def std_func(bins, mass_arr, vel_arr):
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
cen_deltav_arr = []
if index1 == last_index:
break
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def get_deltav_sigma_mocks(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
# Using defintions from Moffett paper
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def lnprob(theta, x_vals, y_vals, err_tot):
"""
Calculates log probability for emcee
Parameters
----------
theta: array
Array of parameter values
x_vals: array
Array of x-axis values
y_vals: array
Array of y-axis values
err_tot: array
Array of error values of mass function
Returns
---------
lnp: float
Log probability given a model
chi2: float
Value of chi-squared given a model
"""
m, b = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0:
try:
model = m * x_vals + b
chi2 = chi_squared(y_vals, model, err_tot)
lnp = -chi2 / 2
if math.isnan(lnp):
raise ValueError
except (ValueError, RuntimeWarning, UserWarning):
lnp = -np.inf
chi2 = -np.inf
else:
chi2 = -np.inf
lnp = -np.inf
return lnp, chi2
def chi_squared(data, model, err_data):
"""
Calculates chi squared
Parameters
----------
data: array
Array of data values
model: array
Array of model values
err_data: float
Error in data values
Returns
---------
chi_squared: float
Value of chi-squared given a model
"""
chi_squared_arr = (data - model)**2 / (err_data**2)
chi_squared = np.sum(chi_squared_arr)
return chi_squared
global model_init
global survey
global path_to_proc
global mf_type
survey = 'resolveb'
machine = 'mac'
mf_type = 'smf'
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_external = dict_of_paths['ext_dir']
path_to_data = dict_of_paths['data_dir']
if machine == 'bender':
halo_catalog = '/home/asadm2/.astropy/cache/halotools/halo_catalogs/'\
'vishnu/rockstar/vishnu_rockstar_test.hdf5'
elif machine == 'mac':
halo_catalog = path_to_raw + 'vishnu_rockstar_test.hdf5'
if survey == 'eco':
catl_file = path_to_raw + "eco/eco_all.csv"
elif survey == 'resolvea' or survey == 'resolveb':
catl_file = path_to_raw + "resolve/RESOLVE_liveJune2018.csv"
if survey == 'eco':
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
elif survey == 'resolvea':
path_to_mocks = path_to_data + 'mocks/m200b/resolvea/'
elif survey == 'resolveb':
path_to_mocks = path_to_data + 'mocks/m200b/resolveb/'
std_red_mocks, std_blue_mocks, centers_red_mocks, \
centers_blue_mocks = get_deltav_sigma_mocks(survey, path_to_mocks)
## Histogram of red and blue sigma in bins of central stellar mass to see if the
## distribution of values to take std of is normal or lognormal
nrows = 2
ncols = 5
if survey == 'eco' or survey == 'resolvea':
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
fig3, axs = plt.subplots(nrows, ncols)
for i in range(0, nrows, 1):
for j in range(0, ncols, 1):
if i == 0: # row 1 for all red bins
axs[i, j].hist(np.log10(std_red_mocks.T[j]), histtype='step', \
color='indianred', linewidth=4, linestyle='-') # first red bin
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
red_stellar_mass_bins[j],2), np.round(
red_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_red_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
else: # row 2 for all blue bins
axs[i, j].hist(np.log10(std_blue_mocks.T[j]), histtype='step', \
color='cornflowerblue', linewidth=4, linestyle='-')
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
blue_stellar_mass_bins[j],2), np.round(
blue_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_blue_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
for ax in axs.flat:
ax.set(xlabel=r'\boldmath$\sigma \left[km/s\right]$')
for ax in axs.flat:
ax.label_outer()
plt.show()
## Measuring fractional error in sigma of red and blue galaxies in all 5 bins
sigma_av_red = []
frac_err_red = []
for idx in range(len(std_red_mocks.T)):
mean = np.mean(std_red_mocks.T[idx][~np.isnan(std_red_mocks.T[idx])])
sigma_av_red.append(mean)
frac_err = (std_red_mocks.T[idx][~np.isnan(std_red_mocks.T[idx])] \
- mean)/mean
frac_err_red.append(frac_err)
frac_err_red = np.array(frac_err_red, dtype=list)
sigma_av_blue = []
frac_err_blue = []
for idx in range(len(std_blue_mocks.T)):
mean = np.mean(std_blue_mocks.T[idx][~np.isnan(std_blue_mocks.T[idx])])
sigma_av_blue.append(mean)
frac_err = (std_blue_mocks.T[idx][~np.isnan(std_blue_mocks.T[idx])] \
- mean)/mean
frac_err_blue.append(frac_err)
frac_err_blue = np.array(frac_err_blue, dtype=list)
## Fit fractional error distributions
nrows = 2
ncols = 5
if survey == 'eco' or survey == 'resolvea':
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
max_red_arr = np.empty(len(frac_err_red))
max_blue_arr = np.empty(len(frac_err_blue))
for idx in range(len(frac_err_red)):
max_red = plt.hist(frac_err_red[idx], density=True)[0].max()
max_blue = plt.hist(frac_err_blue[idx], density=True)[0].max()
max_red_arr[idx] = max_red + 0.05
max_blue_arr[idx] = max_blue + 0.05
print(np.mean(frac_err_red[idx]))
print(np.mean(frac_err_blue[idx]))
plt.clf()
red_a = []
red_loc = []
red_scale = []
blue_a = []
blue_loc = []
blue_scale = []
fig3, axs = plt.subplots(nrows, ncols)
for i in range(0, nrows, 1):
for j in range(0, ncols, 1):
if i == 0: # row 1 for all red bins
frac_err_arr = frac_err_red[j]
axs[i,j].hist(frac_err_arr, density=True, histtype='step',
linewidth=3, color='k')
# find minimum and maximum of xticks, so we know
# where we should compute theoretical distribution
xt = axs[i,j].get_xticks()
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(frac_err_arr))
loc_log, scale_log = stats.logistic.fit(frac_err_arr)
pdf_logistic = stats.logistic.pdf(lnspc, loc_log, scale_log)
axs[i,j].plot(lnspc, pdf_logistic, label="Logistic")
# a_beta, b_beta, loc_beta, scale_beta = stats.beta.fit(frac_err_arr)
# pdf_beta = stats.beta.pdf(lnspc, a_beta, b_beta, loc_beta,
# scale_beta)
# axs[i,j].plot(lnspc, pdf_beta, label="Beta")
loc_norm, scale_norm = stats.norm.fit(frac_err_arr)
pdf_norm = stats.norm.pdf(lnspc, loc_norm, scale_norm)
axs[i,j].plot(lnspc, pdf_norm, label="Normal")
a_sn, loc_sn, scale_sn = stats.skewnorm.fit(frac_err_arr)
pdf_skewnorm = stats.skewnorm.pdf(lnspc, a_sn, loc_sn, scale_sn)
axs[i,j].plot(lnspc, pdf_skewnorm, label="Skew-normal")
red_a.append(a_sn)
red_loc.append(loc_sn)
red_scale.append(scale_sn)
# a_w, loc_w, scale_w = stats.weibull_min.fit(frac_err_arr)
# pdf_weibull = stats.weibull_min.pdf(lnspc, a_w, loc_w, scale_w)
# axs[i,j].plot(lnspc, pdf_weibull, label="Weibull")
# a_g,loc_g,scale_g = stats.gamma.fit(frac_err_arr)
# pdf_gamma = stats.gamma.pdf(lnspc, a_g, loc_g, scale_g)
# axs[i,j].plot(lnspc, pdf_gamma, label="Gamma")
axs[i,j].set_title('[{0}-{1}]'.format(np.round(
red_stellar_mass_bins[j],2), np.round(
red_stellar_mass_bins[j+1],2)),fontsize=20,
color='indianred')
textstr = '\n'.join((
r'$\mu=%.2f$' % (a_sn, ),
r'$loc=%.2f$' % (loc_sn, ),
r'$scale=%.2f$' % (scale_sn, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axs[i,j].set_ylim(0, max_red_arr[j])
# axs[i, j].text(0.4, 0.8, textstr, fontsize=12, bbox=props,
# transform=axs[i, j].transAxes)
else: # row 2 for all blue bins
frac_err_arr = frac_err_blue[j]
axs[i,j].hist(frac_err_arr, density=True, histtype='step',
linewidth=3, color='k')
# find minimum and maximum of xticks, so we know
# where we should compute theoretical distribution
xt = axs[i,j].get_xticks()
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(frac_err_arr))
loc_log, scale_log = stats.logistic.fit(frac_err_arr)
pdf_logistic = stats.logistic.pdf(lnspc, loc_log, scale_log)
axs[i,j].plot(lnspc, pdf_logistic, label="Logistic")
# a_beta, b_beta, loc_beta, scale_beta = stats.beta.fit(frac_err_arr)
# pdf_beta = stats.beta.pdf(lnspc, a_beta, b_beta, loc_beta,
# scale_beta)
# axs[i,j].plot(lnspc, pdf_beta, label="Beta")
loc_norm, scale_norm = stats.norm.fit(frac_err_arr)
pdf_norm = stats.norm.pdf(lnspc, loc_norm, scale_norm)
axs[i,j].plot(lnspc, pdf_norm, label="Normal")
a_sn, loc_sn, scale_sn = stats.skewnorm.fit(frac_err_arr)
pdf_skewnorm = stats.skewnorm.pdf(lnspc, a_sn, loc_sn, scale_sn)
axs[i,j].plot(lnspc, pdf_skewnorm, label="Skew-normal")
blue_a.append(a_sn)
blue_loc.append(loc_sn)
blue_scale.append(scale_sn)
# a_w, loc_w, scale_w = stats.weibull_min.fit(frac_err_arr)
# pdf_weibull = stats.weibull_min.pdf(lnspc, a_w, loc_w, scale_w)
# axs[i,j].plot(lnspc, pdf_weibull, label="Weibull")
# a_g, loc_g, scale_g = stats.gamma.fit(frac_err_arr)
# pdf_gamma = stats.gamma.pdf(lnspc, a_g, loc_g,scale_g)
# axs[i,j].plot(lnspc, pdf_gamma, label="Gamma")
axs[i,j].set_title('[{0}-{1}]'.format(np.round(
blue_stellar_mass_bins[j],2), np.round(
blue_stellar_mass_bins[j+1],2)), fontsize=20,
color='cornflowerblue')
textstr = '\n'.join((
r'$\mu=%.2f$' % (a_sn, ),
r'$loc=%.2f$' % (loc_sn, ),
r'$scale=%.2f$' % (scale_sn, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axs[i,j].set_ylim(0, max_blue_arr[j])
# axs[i, j].text(0.4, 0.8, textstr, fontsize=12, bbox=props,
# transform=axs[i, j].transAxes)
red_a = np.array(red_a)
red_loc = np.array(red_loc)
red_scale = np.array(red_scale)
blue_a = np.array(blue_a)
blue_loc = np.array(blue_loc)
blue_scale = np.array(blue_scale)
a_arr = (np.array((red_a, blue_a))).flatten()
loc_arr = (np.array((red_loc, blue_loc))).flatten()
scale_arr = (np.array((red_scale, blue_scale))).flatten()
axs[0,0].legend(loc='center right', prop={'size': 8})
axs[1,2].set(xlabel=r'\boldmath$(\sigma - \bar \sigma )/ \bar \sigma$')
plt.show()
## Simulating errors
np.random.seed(30)
m_true_arr = np.round(np.random.uniform(-4.9, 0.4, size=500),2)
b_true_arr = np.round(np.random.uniform(1, 7, size=500),2)
## Keeping data fixed
m_true = m_true_arr[50]
b_true = b_true_arr[50]
N=10
x = np.sort(10*np.random.rand(N))
samples_arr = []
chi2_arr = []
yerr_arr = []
for i in range(500):
print(i)
## Mimicking non-gaussian errors from mocks
# yerr = stats.skewnorm.rvs(a_arr, loc_arr, scale_arr)
## Corresponding gaussian distributions
var_arr = stats.skewnorm.stats(a_arr, loc_arr, scale_arr, moments='mvsk')[1]
# mu_arr = stats.skewnorm.stats(a_arr, loc_arr, scale_arr, moments='mvsk')[0]
std_arr = np.sqrt(var_arr)
## Simulating gaussian errors with mean of 0 and same sigma as corresponding
## non-gaussian fits
yerr = stats.norm.rvs(np.zeros(10), std_arr)
y = m_true * x + b_true
y_new = y + y*yerr
pos = [0,5] + 1e-4 * np.random.randn(64, 2)
nwalkers, ndim = pos.shape
nsteps = 5000
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(x, y_new, std_arr))
sampler.run_mcmc(pos, nsteps, store=True, progress=True)
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
chi2 = sampler.get_blobs(discard=100, thin=15, flat=True)
samples_arr.append(flat_samples)
chi2_arr.append(chi2)
yerr_arr.append(yerr)
non_gaussian_samples_arr = np.array(samples_arr)
non_gaussian_chi2_arr = np.array(chi2_arr)
non_gaussian_yerr_arr = np.array(yerr_arr)
gaussian_samples_arr = np.array(samples_arr)
gaussian_chi2_arr = np.array(chi2_arr)
gaussian_yerr_arr = np.array(yerr_arr)
# Get parameter values for each realization that correspond to lowest
# chi-squared value
gaussian_minchi2_arr = []
gaussian_minm_arr = []
gaussian_minb_arr = []
for idx in range(len(gaussian_chi2_arr)):
gaussian_minchi2_arr.append(min(gaussian_chi2_arr[idx]))
gaussian_minm_arr.append(gaussian_samples_arr[idx][:,0][np.argmin(gaussian_chi2_arr[idx])])
gaussian_minb_arr.append(gaussian_samples_arr[idx][:,1][np.argmin(gaussian_chi2_arr[idx])])
samples = np.column_stack((gaussian_minm_arr,gaussian_minb_arr))
non_gaussian_minchi2_arr = []
non_gaussian_minm_arr = []
non_gaussian_minb_arr = []
for idx in range(len(non_gaussian_chi2_arr)):
non_gaussian_minchi2_arr.append(min(non_gaussian_chi2_arr[idx]))
non_gaussian_minm_arr.append(non_gaussian_samples_arr[idx][:,0][np.argmin(non_gaussian_chi2_arr[idx])])
non_gaussian_minb_arr.append(non_gaussian_samples_arr[idx][:,1][ | np.argmin(non_gaussian_chi2_arr[idx]) | numpy.argmin |
# standard library imports
import math
import os
import socket
import struct
import time
from pathlib import Path
from threading import Lock, Thread
# third-party imports
import cv2
import numpy as np
# local imports
from tmrl.config.config_constants import LIDAR_BLACK_THRESHOLD
class TM2020OpenPlanetClient:
def __init__(self, host='127.0.0.1', port=9000):
self._host = host
self._port = port
# Threading attributes:
self.__lock = Lock()
self.__data = None
self.__t_client = Thread(target=self.__client_thread, args=(), kwargs={}, daemon=True)
self.__t_client.start()
def __client_thread(self):
"""
Thread of the client.
This listens for incoming data until the object is destroyed
TODO: handle disconnection
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self._host, self._port))
data_raw = b''
while True: # main loop
while len(data_raw) < 44:
data_raw += s.recv(1024)
div = len(data_raw) // 44
data_used = data_raw[(div - 1) * 44:div * 44]
data_raw = data_raw[div * 44:]
self.__lock.acquire()
self.__data = data_used
self.__lock.release()
def retrieve_data(self, sleep_if_empty=0.1):
"""
Retrieves the most recently received data
Use this function to retrieve the most recently received data
If block if nothing has been received so far
"""
c = True
while c:
self.__lock.acquire()
if self.__data is not None:
data = struct.unpack('<fffffffffff', self.__data)
c = False
self.__lock.release()
if c:
time.sleep(sleep_if_empty)
return data
def load_digits():
p = Path(os.path.dirname(os.path.realpath(__file__))) / 'digits'
zero = cv2.imread(str(p / '0.png'), 0)
One = cv2.imread(str(p / '1.png'), 0)
Two = cv2.imread(str(p / '2.png'), 0)
Three = cv2.imread(str(p / '3.png'), 0)
four = cv2.imread(str(p / '4.png'), 0)
five = cv2.imread(str(p / '5.png'), 0)
six = cv2.imread(str(p / '6.png'), 0)
seven = cv2.imread(str(p / '7.png'), 0)
eight = cv2.imread(str(p / '8.png'), 0)
nine = cv2.imread(str(p / '9.png'), 0)
digits = | np.array([zero, One, Two, Three, four, five, six, seven, eight, nine]) | numpy.array |
import numpy as np
class vents:
def __init__(self, segments):
self.segments = segments
(self.maxx, self.maxy) = self._getmaxxy()
self.board = np.zeros((self.maxx+1, self.maxy+1), dtype=int)
def _getmaxxy(self):
allxs = [x[0] for x in self.segments]
allxs.extend([x[2] for x in self.segments])
allys = [x[1] for x in self.segments]
allys.extend([x[3] for x in self.segments])
print(f"segments: {self.segments}")
print([x[0] for x in self.segments])
print([x[2] for x in self.segments])
print(f"allxs: {allxs}")
maxx = max(allxs)
maxy = max(allys)
print(f"(maxx, maxy): ({maxx}, {maxy})")
return (int(maxx), int(maxy))
def _draw_vertical(self, s):
print(f"draw vertical: {s}")
x = s[0]
if s[3] < s[1]:
(start, fin) = (s[3], s[1])
else:
(start, fin) = (s[1], s[3])
for y in range(start, fin+1):
self.board[x, y] += 1
print(np.transpose(self.board))
def _draw_horizontal(self, s):
print(f"draw horizontal: {s}")
y = s[1]
if s[2] < s[0]:
(start, fin) = (s[2], s[0])
else:
(start, fin) = (s[0], s[2])
for x in range(start, fin+1):
self.board[x, y] += 1
print( | np.transpose(self.board) | numpy.transpose |
import numpy as np
from scipy import signal
def approximate_polygon(coords, tolerance):
"""Approximate a polygonal chain with the specified tolerance.
It is based on the Douglas-Peucker algorithm.
Note that the approximated polygon is always within the convex hull of the
original polygon.
Parameters
----------
coords : (N, 2) array
Coordinate array.
tolerance : float
Maximum distance from original points of polygon to approximated
polygonal chain. If tolerance is 0, the original coordinate array
is returned.
Returns
-------
coords : (M, 2) array
Approximated polygonal chain where M <= N.
References
----------
.. [1] https://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
if tolerance <= 0:
return coords
chain = np.zeros(coords.shape[0], 'bool')
# pre-allocate distance array for all points
dists = np.zeros(coords.shape[0])
chain[0] = True
chain[-1] = True
pos_stack = [(0, chain.shape[0] - 1)]
end_of_chain = False
while not end_of_chain:
start, end = pos_stack.pop()
# determine properties of current line segment
r0, c0 = coords[start, :]
r1, c1 = coords[end, :]
dr = r1 - r0
dc = c1 - c0
segment_angle = - np.arctan2(dr, dc)
segment_dist = c0 * np.sin(segment_angle) + r0 * np.cos(segment_angle)
# select points in-between line segment
segment_coords = coords[start + 1:end, :]
segment_dists = dists[start + 1:end]
# check whether to take perpendicular or euclidean distance with
# inner product of vectors
# vectors from points -> start and end
dr0 = segment_coords[:, 0] - r0
dc0 = segment_coords[:, 1] - c0
dr1 = segment_coords[:, 0] - r1
dc1 = segment_coords[:, 1] - c1
# vectors points -> start and end projected on start -> end vector
projected_lengths0 = dr0 * dr + dc0 * dc
projected_lengths1 = - dr1 * dr - dc1 * dc
perp = np.logical_and(projected_lengths0 > 0,
projected_lengths1 > 0)
eucl = np.logical_not(perp)
segment_dists[perp] = np.abs(
segment_coords[perp, 0] * | np.cos(segment_angle) | numpy.cos |
from unittest import TestCase
from unittest.mock import patch
from numpy import array_equal
from numpy.ma import array
from qilib.data_set import DataSet, DataArray
from qtt.measurements.post_processing import SignalProcessorInterface
from qtt.measurements.post_processing import SignalProcessorRunner
class TestSignalProcessorRunner(TestCase):
@staticmethod
def test_run():
with patch('qtt.measurements.post_processing.interfaces.signal_processor_interface.SignalProcessorInterface',
spec=SignalProcessorInterface) as spi:
class DummySignalProcessor(spi):
def __init__(self):
self._signal_data = None
def run_process(self, signal_data: DataSet) -> DataSet:
self._signal_data = signal_data
return signal_data
signal_processor_runner = SignalProcessorRunner()
signal_processor_runner.add_signal_processor(DummySignalProcessor())
data_set = DataSet()
signal_processor_runner.run(data_set)
spi.run_process.assert_called_once()
spi.run_process.assert_called_with(data_set)
def test_add_signal_processor(self):
class DummySignalProcessor(SignalProcessorInterface):
def __init__(self):
self._signal_data = None
def run_process(self, signal_data: DataSet) -> DataSet:
self._signal_data = signal_data
return signal_data
signal_processor = SignalProcessorRunner()
signal_processor.add_signal_processor(DummySignalProcessor())
self.assertEqual(len(signal_processor._signal_processors), 1)
self.assertIsInstance(signal_processor._signal_processors[0], DummySignalProcessor)
def test_add_signal_processor_with_wrong_type(self):
signal_processor = SignalProcessorRunner()
with self.assertRaises(TypeError):
signal_processor.add_signal_processor(None)
self.assertEqual(len(signal_processor._signal_processors), 0)
def test_run_process_without_signal_processor(self):
data_set = DataSet(data_arrays=DataArray('x', 'x', preset_data=array([1, 2, 3, 4, 5])))
signal_processor_runner = SignalProcessorRunner()
new_data_set = signal_processor_runner.run(data_set)
self.assertIs(data_set.data_arrays['x'], new_data_set.data_arrays['x'])
self.assertTrue(array_equal(new_data_set.data_arrays['x'], | array([1, 2, 3, 4, 5]) | numpy.ma.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 11:43:28 2021
@author: <NAME>
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
class Panels:
def __init__(self, n_panels):
self.n_panels = self.get_n_panels(n_panels)
self.x_coords, self.y_coords, self.camber_line = self.get_coords(self.n_panels)
self.control_x_coords, self.control_y_coords = self.get_control_points(self.x_coords, self.y_coords)
self.normal = self.get_normal(self.x_coords, self.y_coords)
self.lengths = self.get_length(self.x_coords, self.y_coords)
self.theta = self.get_angles(self.x_coords, self.y_coords, self.lengths)
# Allows user to set y coords of panels
# @param y coords to be set
def set_y_coords(self, y_coords):
self.y_coords = y_coords
self.camber_line = self.get_camber(self.y_coords)
self.control_x_coords, self.control_y_coords = self.get_control_points(self.x_coords, self.y_coords)
self.normal = self.get_normal(self.x_coords, self.y_coords)
self.lengths = self.get_length(self.x_coords, self.y_coords)
self.theta = self.get_angles(self.x_coords, self.y_coords, self.lengths)
# Calculates the camberline of given panels coordinates
# @param Y cooridinates of panels
def get_camber(self, y_coords):
bot_surface = y_coords[0:len(y_coords)//2+1]
top_surface = np.flip(y_coords[len(y_coords)//2:])
camber_line = (top_surface + bot_surface) / 2.0
return camber_line
# Ensures the passed number of panels is valid
# @param Number of panels to create
def get_n_panels(self, n_panels):
if int(round(n_panels)) % 2 == 0:
return int(round(n_panels))
else:
raise Exception("Invalid number of panels (must be even).")
# Gets the x/c and y/c normalized coordinates of the panels
# @param Number of panels
def get_coords(self, n_panels):
x_coords = self.get_x_coords(n_panels)
y_coords, camber_line = self.get_y_coords(x_coords)
return x_coords, y_coords, camber_line
# Gets the x/c normalized coordinates of the panels
# @param Number of panels
def get_x_coords(self, n_panels):
n = (n_panels//2)
j = np.arange(n+1)
top_coords = 0.5 - 0.5*np.cos(j*np.pi/n)
bot_coords = 0.5 + 0.5*np.cos(j*np.pi/n)
x_coords = np.concatenate((bot_coords, top_coords[1:]))
return x_coords
# Gets the y/c normalized coordinates of the panels and camber updated x/c normalized coords of the panels
# @param X cooridinates of panels
def get_y_coords(self, x_coords):
x_on_c = x_coords[0:len(x_coords)//2+1]
yf = 0.15 * np.random.rand() + 0.10
xf = 0.30 * np.random.rand() + 0.10
m0 = (100.0 - 2.0*(yf/xf)) * np.random.rand() + 2.0*(yf/xf)
a = np.sqrt(xf/(m0*(m0*xf-2.0*yf)))*abs(m0*xf-yf)
b = abs((m0*xf-yf)*yf)/(m0*xf-2.0*yf)
h = xf
k = (-yf*yf)/(m0*xf-2.0*yf)
LE_thickness = ((b*np.sqrt(a*a-(x_on_c*(x_on_c<=xf)-h)**2.0)+a*k) / a) * (x_on_c<=xf)
c = -yf/(xf*xf-2.0*xf+1)
d = (2.0*xf*yf)/(xf*xf-2.0*xf+1)
e = (yf*(1-2.0*xf))/(xf*xf-2.0*xf+1)
TE_thickness = (c*x_on_c*x_on_c + d*x_on_c + e) * (x_on_c>xf)
half_thickness = 0.5*LE_thickness + 0.5*TE_thickness
half_thickness[half_thickness<1.0e-4]=0.0
x1 = 0.40 * np.random.rand() + 0.10
y1 = ((0.08 - 0.0001) * np.random.rand() + 0.0001)*np.sign(-np.random.rand()+0.75)
xm = 0.30 * np.random.rand() + 0.65
if xm >= 0.80:
xm = 1.0
x2 = 1.1
y2 = 0.0
else:
x2 = 0.10 * np.random.rand() + 0.85
y2 = -((0.03 - 0.0001) * np.random.rand() + 0.0001)*np.sign(y1)
f1 = (2.0*y1*x_on_c)/x1 - (y1*x_on_c*x_on_c)/(x1*x1)
f2 = (-y1*x_on_c*x_on_c)/(x1*x1-2.0*x1*xm+xm*xm) + (2.0*x1*y1*x_on_c)/(x1*x1-2.0*x1*xm+xm*xm) - (y1*xm*(2.0*x1-xm))/(x1*x1-2.0*x1*xm+xm*xm)
f3 = (-y2*x_on_c*x_on_c)/((x2-xm)*(x2-xm)) + (2.0*x2*y2*x_on_c)/((x2-xm)*(x2-xm)) - (y2*xm*(2.0*x2-xm))/((x2-xm)*(x2-xm))
f4 = (-y2*x_on_c*x_on_c)/(x2*x2-2.0*x2+1.0) + (2.0*x2*y2*x_on_c)/(x2*x2-2.0*x2+1.0) - (y2*(2.0*x2-1.0))/(x2*x2-2.0*x2+1.0)
f1 = f1 * (x_on_c>=0.0) * (x_on_c<x1)
f2 = f2 * (x_on_c>=x1) * (x_on_c<=xm)
f3 = f3 * (x_on_c>xm) * (x_on_c<=x2)
f4 = f4 * (x_on_c>x2) * (x_on_c<=1.0)
camber_line = f1+f2+f3+f4
camber_line[abs(camber_line)<1.0e-4]=0.0
y_upper = camber_line + half_thickness
y_lower = camber_line - half_thickness
y_coords = np.concatenate((y_lower, np.flip(y_upper)[1:]))
y_coords[0] = 0.0
y_coords[-1] = 0.0
return y_coords, camber_line
# Gets the locations of the control points
# @param X coords of panels
# @param Y coords of panels
def get_control_points(self, x_coords, y_coords):
control_x_coords = x_coords[1:]-0.5*np.diff(x_coords)
control_y_coords = y_coords[1:]-0.5*np.diff(y_coords)
return control_x_coords, control_y_coords
# Solve the normal vectors for each panel
# @param X coords of panels
# @param Y coords of panels
def get_normal(self, x_coords, y_coords):
x_dirn = np.diff(x_coords).reshape(len(x_coords)-1,1)
y_dirn = np.diff(y_coords).reshape(len(y_coords)-1,1)
tangent = np.transpose(np.concatenate((x_dirn, y_dirn), axis=1))
rotation = np.array([[0.0, -1.0],[1.0, 0.0]])
normal = np.matmul(rotation, tangent)
normal = normal / np.sqrt(normal[0,:]**2.0 + normal[1,:]**2.0)
return normal
# Solve the length of each panel
# @param X coords of panels
# @param Y coords of panels
def get_length(self, x_coords, y_coords):
lengths = (np.diff(y_coords)**2.0+np.diff(x_coords)**2.0)**0.50
return lengths
# Solves the orientation angle between each panel and the x-axis
# @param X coords of panels
# @param Y coords of panels
# @param Length of each panel
def get_angles(self, x_coords, y_coords, lengths):
theta = np.arctan2(np.diff(y_coords), np.diff(x_coords))
return theta
# Renders and save the panels
# @param save path
# @param name of airfoil
def draw(self, path, airfoil_name=''):
if not os.path.isdir(path):
os.mkdir(path)
num = 0
done = False
while not done:
done = not (os.path.exists(path + "/airfoil_" + str(num) + ".png"))
num = num + 1
num = num - 1
if 'rebuilt' in airfoil_name.lower():
path = path + "/airfoil_" + str(num-1) + "_rebuilt.png"
else:
path = path + "/airfoil_" + str(num) + ".png"
plt.close()
normal_x_coords_start = self.x_coords[1:]-0.5*np.diff(self.x_coords)
normal_y_coords_start = self.y_coords[1:]-0.5*np.diff(self.y_coords)
normal_x_coords_end = normal_x_coords_start + 0.04 * self.normal[0,:]
normal_y_coords_end = normal_y_coords_start + 0.04 * self.normal[1,:]
plt.plot(self.x_coords[0:len(self.x_coords)//2+1], self.camber_line, lw=2.0, ls="--", c='r')
plt.axhline(0.0, lw=2.0, c='r')
plt.plot(self.x_coords, self.y_coords, color='b', lw=2.0)
plt.plot(self.control_x_coords, self.control_y_coords, 'd', color='g', markersize=7)
plt.plot(self.x_coords, self.y_coords, 'o', color='k', markersize=7)
for i in range(len(normal_x_coords_start)):
x_points = np.array([normal_x_coords_start[i],normal_x_coords_end[i]])
y_points = np.array([normal_y_coords_start[i],normal_y_coords_end[i]])
plt.plot(x_points, y_points, color='k', lw=2.0)
y_diff = np.diff(y_points)
x_diff = np.diff(x_points)
tangent = np.arctan(y_diff / x_diff)[0]*180.0/np.pi
if x_diff >= 0.0 and y_diff >= 0.0:
angle = -(90.0 - tangent)
elif x_diff < 0.0 and y_diff > 0.0:
angle = (90.0 - abs(tangent))
elif x_diff < 0.0 and y_diff < 0.0:
angle = -(90.0 - tangent) + 180.0
elif x_diff > 0.0 and y_diff < 0.0:
angle = (90.0 - abs(tangent)) + 180.0
t = mpl.markers.MarkerStyle(marker='^')
t._transform = t.get_transform().rotate_deg(angle)
plt.plot(normal_x_coords_end[i], normal_y_coords_end[i], marker=t, color='k', markersize=8)
plt.xlabel("X/C [-]", fontsize="large")
plt.ylabel("Y/C [-]", fontsize="large")
if airfoil_name == '':
plt.title('Airfoil', fontsize="xx-large")
else:
plt.title(airfoil_name, fontsize="xx-large")
plt.xlim([-0.05, 1.05])
plt.ylim([-0.25, 0.20])
plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0],fontsize='large')
plt.yticks([-0.25, -0.15, -0.05, 0.05, 0.15, 0.25],fontsize='large')
plt.gcf().set_size_inches(8,2.8)
plt.savefig(path, dpi=200)
class Solver:
def __init__(self):
self.panels=0.0
self.alpha=0.0
self.v_panels=0.0
self.cp=0.0
self.Cl=0.0
self.Cdp=0.0
self.Cmc4=0.0
# Solves the total local velocity at each control point based on linear varying vortex panel method
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_velocity_vp(self, alpha, panels):
Cn1 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Cn2 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Ct1 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Ct2 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
for i in range(len(panels.control_x_coords)):
xi = panels.control_x_coords[i]
yi = panels.control_y_coords[i]
theta_i = panels.theta[i]
for j in range(len(panels.control_x_coords)):
theta_j = panels.theta[j]
Sj = panels.lengths[j]
Xj = panels.x_coords[j]
Yj = panels.y_coords[j]
if i==j:
Cn2[i,j] = 1.0
Cn1[i,j] = -1.0
Ct2[i,j] = np.pi/2.0
Ct1[i,j] = np.pi/2.0
else:
A = -(xi - Xj)*np.cos(theta_j) - (yi - Yj)*np.sin(theta_j)
B = (xi - Xj)**2.0 + (yi - Yj)**2.0
C = np.sin(theta_i - theta_j)
D = np.cos(theta_i - theta_j)
E = (xi - Xj)*np.sin(theta_j) - (yi - Yj)*np.cos(theta_j)
F = np.log(1.0 + (Sj**2.0 + 2.0*A*Sj)/B)
G = np.arctan2(E*Sj, (B + A*Sj))
P = (xi - Xj)*np.sin(theta_i - 2.0*theta_j) + (yi - Yj)*np.cos(theta_i - 2.0*theta_j)
Q = (xi - Xj)*np.cos(theta_i - 2.0*theta_j) + (yi - Yj)*np.sin(theta_i - 2.0*theta_j)
Cn2[i,j] = D+0.5*Q*F/Sj-(A*C+D*E)*G/Sj
Cn1[i,j] = 0.5*D*F+C*G-Cn2[i,j]
Ct2[i,j] = C+0.5*P*F/Sj+(A*D-C*E)*G/Sj
Ct1[i,j] = 0.5*C*F-D*G-Ct2[i,j]
aerodynamic_matrix = np.zeros((len(panels.x_coords),len(panels.x_coords)))
tangential_matrix = np.zeros((len(panels.x_coords)-1,len(panels.x_coords)))
for i in range(len(panels.x_coords)):
for j in range(len(panels.x_coords)):
if j == 0 and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn1[i,j]
tangential_matrix[i,j] = Ct1[i,j]
elif j > 0 and j < panels.n_panels and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn1[i,j] + Cn2[i,j-1]
tangential_matrix[i,j] = Ct1[i,j] + Ct2[i,j-1]
elif j == panels.n_panels and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn2[i,j-1]
tangential_matrix[i,j] = Ct2[i,j-1]
elif i == panels.n_panels and (j == 0 or j == panels.n_panels):
aerodynamic_matrix[i,j] = 1.0
free_stream_matrix = np.sin(panels.theta - alpha*(np.pi/180.0))
free_stream_matrix = np.append(free_stream_matrix, 0.0)
gamma_prime = np.linalg.solve(aerodynamic_matrix,free_stream_matrix)
self.v_panels = np.matmul(tangential_matrix, gamma_prime) + np.cos(panels.theta - alpha*(np.pi/180.0))
return self.v_panels
# Solves the total local velocity at each control point based on vortex source method
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_velocity_spvp(self, alpha, panels):
Iij = np.zeros((panels.n_panels,panels.n_panels))
Jij = np.zeros((panels.n_panels,panels.n_panels))
Kij = np.zeros((panels.n_panels,panels.n_panels))
Lij = np.zeros((panels.n_panels,panels.n_panels))
for i in range(panels.n_panels):
xi = panels.control_x_coords[i]
yi = panels.control_y_coords[i]
theta_i = panels.theta[i]
c_theta_i = np.cos(theta_i)
s_theta_i = np.sin(theta_i)
for j in range(panels.n_panels):
theta_j = panels.theta[j]
c_theta_j = np.cos(theta_j)
s_theta_j = np.sin(theta_j)
Sj = panels.lengths[j]
Xj = panels.x_coords[j]
Yj = panels.y_coords[j]
A = -(xi-Xj)*c_theta_j-(yi-Yj)*s_theta_j
B = (xi-Xj)**2.0+(yi-Yj)**2.0
Ci = np.sin(theta_i-theta_j)
Cj = -np.cos(theta_i-theta_j)
Cl = np.sin(theta_j-theta_i)
Di = -(xi-Xj)*s_theta_i+(yi-Yj)*c_theta_i
Dj = (xi-Xj)*c_theta_i+(yi-Yj)*s_theta_i
Dl = (xi-Xj)*s_theta_i-(yi-Yj)*c_theta_i
if B-A*A >= 0.0:
E = np.sqrt(B-A*A)
else:
E = 0.0
if B == 0.0 or E == 0.0:
Iij[i,j] = 0.0
Jij[i,j] = 0.0
Kij[i,j] = 0.0
Lij[i,j] = 0.0
else:
term1 = np.log((Sj*Sj+2.0*A*Sj+B)/B)/2.0
term2 = (np.arctan2((Sj+A),E)-np.arctan2(A,E))/E
Iij[i,j] = Ci*term1+(Di-A*Ci)*term2
Jij[i,j] = Cj*term1+(Dj-A*Cj)*term2
Kij[i,j] = Jij[i,j]
Lij[i,j] = Cl*term1+(Dl-A*Cl)*term2
aerodynamic_matrix = np.zeros((panels.n_panels+1,panels.n_panels+1))
for i in range(panels.n_panels+1):
for j in range(panels.n_panels+1):
if i == panels.n_panels:
if j == panels.n_panels:
aerodynamic_matrix[i,j] = -(np.sum(Lij[0,:]) + np.sum(Lij[panels.n_panels-1,:])) + 2.0*np.pi
else:
aerodynamic_matrix[i,j] = Jij[0,j] + Jij[panels.n_panels-1,j]
elif j == panels.n_panels:
aerodynamic_matrix[i,j] = -np.sum(Kij[i,:])
elif i == j:
aerodynamic_matrix[i,j] = np.pi
else:
aerodynamic_matrix[i,j] = Iij[i,j]
beta = panels.theta + np.pi/2.0 - alpha*(np.pi/180.0)
beta[beta > 2.0*np.pi] = beta[beta > 2.0*np.pi] - 2.0*np.pi
free_stream_matrix = -2.0*np.pi*np.cos(beta)
free_stream_matrix = np.append(free_stream_matrix, -2.0*np.pi*(np.sin(beta[0]) + np.sin(beta[panels.n_panels-1])))
source_vortex_soln = np.linalg.solve(aerodynamic_matrix,free_stream_matrix)
self.v_panels = np.zeros(panels.n_panels)
for i in range(panels.n_panels):
term1 = np.sin(beta[i])
term2 = 1.0 / (2.0*np.pi) * np.sum(source_vortex_soln[0:-1]*Jij[i,:])
term3 = source_vortex_soln[-1] / 2.0
term4 = -(source_vortex_soln[-1] / (2.0*np.pi))*np.sum(Lij[i,:])
self.v_panels[i] = term1 + term2 + term3 + term4
return self.v_panels
# Solves the lift, drag, and moment coefficients
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_aerodynamics(self, alpha, panels):
self.alpha = alpha
self.panels = panels
v_panels = self.get_velocity_spvp(alpha, panels)
self.cp = 1.0 - v_panels**2.0
Cf = -self.cp * panels.lengths * panels.normal
Cfnet = np.sum(Cf, axis=1)
Ca = Cfnet[0]
Cn = Cfnet[1]
self.Cmc4 = 0.0
for i in range(len(panels.control_x_coords)):
ra = panels.control_x_coords[i] - 0.25
rn = panels.control_y_coords[i]
dca = Cf[0,i]
dcn = Cf[1,i]
self.Cmc4 = self.Cmc4 - (dcn*ra-dca*rn)
self.Cl = Cn*np.cos(alpha*np.pi/180.0) - Ca*np.sin(alpha*np.pi/180.0)
self.Cdp = Cn*np.sin(alpha*np.pi/180.0) + Ca*np.cos(alpha*np.pi/180.0)
return self.Cl, self.Cdp, self.Cmc4, self.cp
# Calculates the lift and moment curves of a set of panels
def get_curves(self, panels, n_points):
alpha_curve = np.linspace(-5, 15, n_points)
A = np.zeros((3,3))
A[0,0] = len(alpha_curve)
A[1,0] = sum((np.array(alpha_curve)*(np.pi/180.0)))
A[2,0] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[0,1] = sum((np.array(alpha_curve)*(np.pi/180.0)))
A[1,1] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[2,1] = sum((np.array(alpha_curve)*(np.pi/180.0))**3.0)
A[0,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[1,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**3.0)
A[2,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**4.0)
lift_curve = []
moment_curve = []
min_upper_cp_loc = []
min_lower_cp_loc = []
for j in range(n_points):
Cl, Cd, Cm_c4, cp = self.get_aerodynamics(alpha_curve[j],panels)
upper_cp = cp[panels.n_panels//2:]
lower_cp = cp[0:panels.n_panels//2]
lift_curve.append(Cl)
moment_curve.append(Cm_c4)
min_upper_cp_loc.append(panels.control_x_coords[np.argmin(upper_cp)+panels.n_panels//2])
min_lower_cp_loc.append(panels.control_x_coords[np.argmin(lower_cp)])
min_upper_cp_loc = np.mean(min_upper_cp_loc)
min_lower_cp_loc = np.mean(min_lower_cp_loc)
a = len(alpha_curve)*sum(np.array(alpha_curve)*(np.pi/180.0)*np.array(lift_curve))
b = sum(np.array(alpha_curve)*(np.pi/180.0))*sum(np.array(lift_curve))
c = len(alpha_curve)*sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
d = sum(np.array(alpha_curve)*(np.pi/180.0))**2.0
lift_slope = (a-b) / (c-d)
e = sum(np.array(lift_curve))
f = lift_slope * sum(np.array(alpha_curve)*(np.pi/180.0))
g = len(alpha_curve)
zero_lift_angle = 180.0*(f-e) / (g*lift_slope*np.pi)
B = np.zeros((3))
B[0] = sum(np.array(moment_curve))
B[1] = sum(np.array(moment_curve) * np.array(alpha_curve) * (np.pi/180.0))
B[2] = sum(np.array(moment_curve) * (np.array(alpha_curve) * (np.pi/180.0))**2.0)
C = np.linalg.solve(A,B)
curve_parameters = np.zeros(7)
curve_parameters[0] = lift_slope
curve_parameters[1] = zero_lift_angle
curve_parameters[2] = C[0]
curve_parameters[3] = C[1]
curve_parameters[4] = C[2]
curve_parameters[5] = min_upper_cp_loc
curve_parameters[6] = min_lower_cp_loc
return curve_parameters, alpha_curve, lift_curve, moment_curve
# Draws the lift and moment curves
def draw_curves(self, path, panels, name='', estimated_performance=[], rebuilt_panels=0.0):
real_performance, alpha_curve, lift_curve, moment_curve = self.get_curves(panels, 50)
plot_rebuilt = False
if isinstance(rebuilt_panels, Panels):
rebuilt_performance, _, rebuilt_lift_curve, rebuilt_moment_curve = self.get_curves(rebuilt_panels, 50)
plot_rebuilt = True
plot_estimated = False
if len(estimated_performance)==7:
estimated_lift_curve = estimated_performance[0] * (alpha_curve*(np.pi/180.0) - estimated_performance[1]*(np.pi/180.0))
estimated_moment_curve = estimated_performance[2] + estimated_performance[3]*(alpha_curve*(np.pi/180.0)) + estimated_performance[4]*(alpha_curve*(np.pi/180.0))**2.0
plot_estimated = True
if not os.path.isdir(path):
os.mkdir(path)
num = 0
done = False
while not done:
if not plot_rebuilt and not plot_estimated:
done = not (os.path.exists(path + "/lift_" + str(num) + ".png"))
elif not plot_rebuilt and plot_estimated:
done = not (os.path.exists(path + "/estimated_lift_" + str(num) + ".png"))
elif plot_rebuilt and not plot_estimated:
done = not (os.path.exists(path + "/rebuilt_lift_" + str(num) + ".png"))
elif plot_rebuilt and plot_estimated:
done = not (os.path.exists(path + "/estimated_lift_" + str(num) + ".png"))
done = done and not (os.path.exists(path + "/rebuilt_lift_" + str(num) + ".png"))
num = num + 1
num = num - 1
if not plot_rebuilt and not plot_estimated:
lift_path = path + "/lift_" + str(num) + ".png"
if plot_estimated:
lift_path_estimated = path + "/estimated_lift_" + str(num) + ".png"
if plot_rebuilt:
lift_path_rebuilt = path + "/rebuilt_lift_" + str(num) + ".png"
if not plot_rebuilt and not plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, lift_curve, c='b', lw=2.5)
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{l}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Lift Curve for "+name, fontsize="xx-large")
else:
plt.title("Lift Curve", fontsize="xx-large")
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*1.0+np.min(lift_curve), r'$x_{p_{min,u}}$'+' = '+str(round(real_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.9+np.min(lift_curve), r'$x_{p_{min,l}}$'+' = '+str(round(real_performance[6],2)),fontsize='large')
if np.min(lift_curve) < 0.0:
plt.ylim([1.1*np.min(lift_curve), 1.1*np.max(lift_curve)])
else:
plt.ylim([0.9*np.min(lift_curve), 1.1*np.max(lift_curve)])
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.gcf().set_size_inches(8,5.6)
plt.savefig(lift_path, dpi=200)
if plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, lift_curve, c='b', lw=2.5, label='Original')
plt.plot(alpha_curve, estimated_lift_curve, c='r', lw=2.5, label='Estimated', ls='--')
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{l}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Lift Curve for "+name, fontsize="xx-large")
else:
plt.title("Lift Curve", fontsize="xx-large")
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*1.0+np.min(lift_curve), r'$x_{p_{min,u}}$'+' = '+str(round(real_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.8+np.min(lift_curve), r'$x_{p_{min,l}}$'+' = '+str(round(real_performance[6],2)),fontsize='large')
plt.text(-5.2, ( | np.max(lift_curve) | numpy.max |
import functools
import gc
import numpy as np
import osmnx as ox
from networkx import write_gpickle, read_gpickle
import bmm
def download_cambridge_graph(save_path):
cambridge_ll_bbox = [52.245, 52.150, 0.220, 0.025]
raw_graph = ox.graph_from_bbox(*cambridge_ll_bbox,
truncate_by_edge=True,
simplify=False,
network_type='drive')
projected_graph = ox.project_graph(raw_graph)
simplified_graph = ox.simplify_graph(projected_graph)
write_gpickle(simplified_graph, save_path)
# Load cam_graph of Cambridge
def load_graph(path):
graph = read_gpickle(path)
return graph
# Clear lru_cache
def clear_cache():
gc.collect()
wrappers = [
a for a in gc.get_objects()
if isinstance(a, functools._lru_cache_wrapper)]
for wrapper in wrappers:
wrapper.cache_clear()
# Function to sample a random point on the cam_graph
def random_positions(graph, n=1):
edges_arr = np.array(graph.edges)
n_edges = len(edges_arr)
edge_selection_indices = np.random.choice(n_edges, n)
edge_selection = edges_arr[edge_selection_indices]
random_alphas = np.random.uniform(size=(n, 1))
positions = np.concatenate((edge_selection, random_alphas), axis=1)
return positions
# Function to sample a route (given a start position, route length and time_interval (assumed constant))
def sample_route(graph, model, time_interval, length, start_position=None, cart_route=False, observations=False,
d_refine=1, num_inter_cut_off=None, num_pos_route_cap=np.inf):
route = np.zeros((1, 8))
if start_position is None:
start_position = random_positions(graph, 1)
route[0, 1:5] = start_position
start_geom = bmm.get_geometry(graph, start_position[0, :3])
route[0, 5:7] = bmm.src.tools.edges.edge_interpolate(start_geom, start_position[0, 3])
d_max = model.d_max(time_interval)
if num_inter_cut_off is None:
num_inter_cut_off = max(int(time_interval / 1.5), 10)
for t in range(1, length):
prev_pos = route[-1:].copy()
prev_pos[0, -1] = 0
possible_routes = bmm.get_possible_routes(graph, prev_pos, d_max, all_routes=True,
num_inter_cut_off=num_inter_cut_off)
if len(possible_routes) > num_pos_route_cap:
break
# Get all possible positions on each route
discretised_routes_indices_list = []
discretised_routes_list = []
for i, sub_route in enumerate(possible_routes):
# All possible end positions of route
discretised_edge_matrix = bmm.discretise_edge(graph, sub_route[-1, 1:4], d_refine)
if sub_route.shape[0] == 1:
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= route[-1, 4]]
discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
else:
discretised_edge_matrix[:, -1] += sub_route[-2, -1]
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]
# Track route index and append to list
if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
discretised_routes_indices_list += [ | np.ones(discretised_edge_matrix.shape[0], dtype=int) | numpy.ones |
import numpy as np
import math
#.ply format -- X,Y,Z, normalX,normalY,normalZ
def parse_ply_planes(shape_name, num_of_points=2048):
file = open(shape_name,'r')
lines = file.readlines()
vertices = np.zeros([num_of_points,7], np.float32)
assert lines[9].strip() == "end_header"
for i in range(num_of_points):
line = lines[i+10].split()
vertices[i,0] = float(line[0]) #X
vertices[i,1] = float(line[1]) #Y
vertices[i,2] = float(line[2]) #Z
vertices[i,3] = float(line[3]) #normalX
vertices[i,4] = float(line[4]) #normalY
vertices[i,5] = float(line[5]) #normalZ
tmp = vertices[i,0]*vertices[i,3] + vertices[i,1]*vertices[i,4] + vertices[i,2]*vertices[i,5]
vertices[i,6] = -tmp #d for plane ax+by+cz+d = 0
return vertices
def parse_ply_list_to_planes(ref_txt_name, data_dir, data_txt_name):
#open file & read points
ref_file = open(ref_txt_name, 'r')
ref_names = [line.strip() for line in ref_file]
ref_file.close()
data_file = open(data_txt_name, 'r')
data_names = [line.strip() for line in data_file]
data_file.close()
num_shapes = len(ref_names)
ref_points = np.zeros([num_shapes,2048,7], np.float32)
idx = np.zeros([num_shapes], np.int32)
for i in range(num_shapes):
shape_name = data_dir+"/"+ref_names[i]+".ply"
shape_idx = data_names.index(ref_names[i])
shape_planes = parse_ply_planes(shape_name)
ref_points[i,:,:] = shape_planes
idx[i] = shape_idx
return ref_points, idx, ref_names
def write_ply_point(name, vertices):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
fout.close()
def write_ply_point_normal(name, vertices, normals=None):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property float nx\n")
fout.write("property float ny\n")
fout.write("property float nz\n")
fout.write("end_header\n")
if normals is None:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(vertices[ii,3])+" "+str(vertices[ii,4])+" "+str(vertices[ii,5])+"\n")
else:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(normals[ii,0])+" "+str(normals[ii,1])+" "+str(normals[ii,2])+"\n")
fout.close()
def write_ply_triangle(name, vertices, triangles):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("element face "+str(len(triangles))+"\n")
fout.write("property list uchar int vertex_index\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("3 "+str(triangles[ii,0])+" "+str(triangles[ii,1])+" "+str(triangles[ii,2])+"\n")
fout.close()
def write_ply_polygon(name, vertices, polygons):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("element face "+str(len(polygons))+"\n")
fout.write("property list uchar int vertex_index\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii][0])+" "+str(vertices[ii][1])+" "+str(vertices[ii][2])+"\n")
for ii in range(len(polygons)):
fout.write(str(len(polygons[ii])))
for jj in range(len(polygons[ii])):
fout.write(" "+str(polygons[ii][jj]))
fout.write("\n")
fout.close()
def write_obj_triangle(name, vertices, triangles):
fout = open(name, 'w')
for ii in range(len(vertices)):
fout.write("v "+str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("f "+str(triangles[ii,0]+1)+" "+str(triangles[ii,1]+1)+" "+str(triangles[ii,2]+1)+"\n")
fout.close()
def write_obj_polygon(name, vertices, polygons):
fout = open(name, 'w')
for ii in range(len(vertices)):
fout.write("v "+str(vertices[ii][0])+" "+str(vertices[ii][1])+" "+str(vertices[ii][2])+"\n")
for ii in range(len(polygons)):
fout.write("f")
for jj in range(len(polygons[ii])):
fout.write(" "+str(polygons[ii][jj]+1))
fout.write("\n")
fout.close()
#designed to take 64^3 voxels!
def sample_points_polygon_vox64(vertices, polygons, voxel_model_64, num_of_points):
#convert polygons to triangles
triangles = []
for ii in range(len(polygons)):
for jj in range(len(polygons[ii])-2):
triangles.append( [polygons[ii][0], polygons[ii][jj+1], polygons[ii][jj+2]] )
triangles = np.array(triangles, np.int32)
vertices = np.array(vertices, np.float32)
small_step = 1.0/64
epsilon = 1e-6
triangle_area_list = np.zeros([len(triangles)],np.float32)
triangle_normal_list = np.zeros([len(triangles),3],np.float32)
for i in range(len(triangles)):
#area = |u x v|/2 = |u||v|sin(uv)/2
a,b,c = vertices[triangles[i,1]]-vertices[triangles[i,0]]
x,y,z = vertices[triangles[i,2]]-vertices[triangles[i,0]]
ti = b*z-c*y
tj = c*x-a*z
tk = a*y-b*x
area2 = math.sqrt(ti*ti+tj*tj+tk*tk)
if area2<epsilon:
triangle_area_list[i] = 0
triangle_normal_list[i,0] = 0
triangle_normal_list[i,1] = 0
triangle_normal_list[i,2] = 0
else:
triangle_area_list[i] = area2
triangle_normal_list[i,0] = ti/area2
triangle_normal_list[i,1] = tj/area2
triangle_normal_list[i,2] = tk/area2
triangle_area_sum = np.sum(triangle_area_list)
sample_prob_list = (num_of_points/triangle_area_sum)*triangle_area_list
triangle_index_list = np.arange(len(triangles))
point_normal_list = | np.zeros([num_of_points,6],np.float32) | numpy.zeros |
import pandas as pd
from scipy.io import wavfile
import numpy as np
import argparse
def stock_to_wav(filename):
prices = pd.read_csv(f"{filename}.csv").Close.values
prices = | np.diff(prices) | numpy.diff |
import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal( | np.logaddexp(xf, yf) | numpy.logaddexp |
"""Film Mode Matching Mode Solver
Implementation of the Film Mode Matching (FMM) algorithm, as described in:
- Sudbo, "Film mode matching a versatile numerical method for vector mode field calculations in dielectric waveguides", Pure App. Optics, 2 (1993), 211-233
- Sudbo, "Improved formulation of the film mode matching method for mode field calculations in dielectric waveguides", Pure App. Optics, 3 (1994), 381-388
Examples
========
See L{FMM1d} and L{FMM2d}.
"""
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from functools import reduce
__author__ = '<NAME> & <NAME>'
import numpy
import scipy
import scipy.optimize
import copy
import EMpy.utils
from EMpy.modesolvers.interface import *
import pylab
class Message(object):
def __init__(self, msg, verbosity=0):
self.msg = msg
self.verbosity = verbosity
def show(self, verbosity=0):
if self.verbosity <= verbosity:
print((self.verbosity - 1) * '\t' + self.msg)
class Struct(object):
"""Empty class to fill with whatever I want. Maybe a dictionary would do?"""
pass
class Boundary(object):
"""Boundary conditions.
Electric and Magnetic boundary conditions are translated to Symmetric
and Antisymmetric for each field.
@ivar xleft: Left bc on x.
@ivar xright: Right bc on x.
@ivar yleft: Left bc on y.
@ivar yright: Right bc on y.
"""
def __init__(self, xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall'):
"""Set the boundary conditions, validate and translate."""
self.xleft = xleft
self.yleft = yleft
self.xright = xright
self.yright = yright
self.validate()
self.translate()
def validate(self):
"""Validate the input.
@raise ValueError: Unknown boundary.
"""
if not reduce(lambda x, y: x & y,
[(x == 'Electric Wall') | (x == 'Magnetic Wall') for x in [self.xleft, self.yleft, self.xright, self.yright]]):
raise ValueError('Unknown boundary.')
def translate(self):
"""Translate for each field.
@raise ValueError: Unknown boundary.
"""
self.xh = ''
self.xe = ''
self.yh = ''
self.ye = ''
if self.xleft == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xleft == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.xright == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xright == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yleft == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yleft == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yright == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yright == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
def __str__(self):
return 'xleft = %s, xright = %s, yleft = %s, yright = %s' % (self.xleft, self.xright, self.yleft, self.yright)
class Slice(object):
"""One dimensional arrangement of layers and 1d modes.
A slice is made of a stack of layers, i.e. refractive indeces with a thickness,
with given boundary conditions.
It holds 1d modes, both TE and TM.
@ivar x1: start point of the slice in x.
@ivar x2: end point of the slice in x.
@ivar Uy: array of points delimiting the layers.
@ivar boundary: boundary conditions.
@ivar modie: E modes.
@ivar modih: H modes.
@ivar Ux: array of points delimiting the slices in x (internally set).
@ivar refractiveindex: refractive index of all the slices (internally set).
@ivar epsilon: epsilon of all the slices (internally set).
@ivar wl: vacuum wavelength.
"""
def __init__(self, x1, x2, Uy, boundary, modie, modih):
self.x1 = x1
self.x2 = x2
self.Uy = Uy
self.boundary = boundary
self.modie = modie
self.modih = modih
def __str__(self):
return 'x1 = %g, x2 = %g\nUy = %s\nboundary = %s' % (self.x1, self.x2, self.Uy, self.boundary)
class FMMMode1d(Mode):
"""One dimensional mode.
Note
====
Virtual class.
"""
pass
class FMMMode1dx(FMMMode1d):
"""Matching coefficients in the x-direction.
L{FMMMode1dy}s are weighted by these coefficients to assure continuity.
"""
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.U.__str__())
class FMMMode1dy(FMMMode1d):
"""One dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
Note
====
The mode is suppose one dimensional, in the y direction.
@ivar sl: array of value of the mode at the lhs of each slice.
@ivar sr: array of value of the mode at the rhs of each slice.
@ivar al: array of value of the derivative of the mode at the lhs of each slice.
@ivar ar: array of value of the derivative of the mode at the lhs of each slice.
@ivar k: wavevector inside each layer.
@ivar keff: effective wavevector.
@ivar zero: how good the mode is? it must be as close to zero as possible!
@ivar Uy: array of points delimiting the layers.
"""
def eval(self, y_):
"""Evaluate the mode at y."""
y = numpy.atleast_1d(y_)
ny = len(y)
f = numpy.zeros(ny, dtype=complex)
for iU in range(len(self.U) - 1):
k = self.k[iU]
sl = self.sl[iU]
al = self.al[iU]
Ul = self.U[iU]
Ur = self.U[iU+1]
idx = numpy.where((Ul <= y) & (y <= Ur))
yy = y[idx] - Ul
f[idx] = sl * numpy.cos(k * yy) + al * sinxsux(k * yy) * yy
return f
def plot(self, y):
f = self.eval(y)
pylab.plot(y, numpy.real(f), y, numpy.imag(y))
pylab.legend(('real', 'imag'))
pylab.xlabel('y')
pylab.ylabel('mode1d')
pylab.show()
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nkeff = %s\nzero = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.keff.__str__(),
self.zero.__str__(),
self.U.__str__())
class FMMMode2d(Mode):
"""Two dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
"""
def get_x(self, n=100):
return numpy.linspace(self.slicesx[0].Ux[0], self.slicesx[0].Ux[-1], n)
def get_y(self, n=100):
return numpy.linspace(self.slicesx[0].Uy[0], self.slicesx[0].Uy[-1], n)
def eval(self, x_=None, y_=None):
"""Evaluate the mode at x,y."""
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
nmodi = len(self.modie)
lenx = len(x)
leny = len(y)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
uh = numpy.zeros((nmodi, lenx), dtype=complex)
ue = numpy.zeros_like(uh)
udoth = numpy.zeros_like(uh)
udote = numpy.zeros_like(uh)
Exsh = numpy.zeros((leny, nmodi), dtype=complex)
Exah = numpy.zeros_like(Exsh)
Exse = numpy.zeros_like(Exsh)
Exae = numpy.zeros_like(Exsh)
Eysh = numpy.zeros_like(Exsh)
Eyah = numpy.zeros_like(Exsh)
Eyse = numpy.zeros_like(Exsh)
Eyae = numpy.zeros_like(Exsh)
Ezsh = numpy.zeros_like(Exsh)
Ezah = numpy.zeros_like(Exsh)
Ezse = numpy.zeros_like(Exsh)
Ezae = numpy.zeros_like(Exsh)
cBxsh = numpy.zeros_like(Exsh)
cBxah = numpy.zeros_like(Exsh)
cBxse = numpy.zeros_like(Exsh)
cBxae = numpy.zeros_like(Exsh)
cBysh = numpy.zeros_like(Exsh)
cByah = numpy.zeros_like(Exsh)
cByse = numpy.zeros_like(Exsh)
cByae = numpy.zeros_like(Exsh)
cBzsh = numpy.zeros_like(Exsh)
cBzah = numpy.zeros_like(Exsh)
cBzse = numpy.zeros_like(Exsh)
cBzae = numpy.zeros_like(Exsh)
ExTE = numpy.zeros((leny,lenx), dtype=complex)
EyTE = numpy.zeros_like(ExTE)
EzTE = numpy.zeros_like(ExTE)
ExTM = numpy.zeros_like(ExTE)
EyTM = numpy.zeros_like(ExTE)
EzTM = numpy.zeros_like(ExTE)
cBxTE = numpy.zeros_like(ExTE)
cByTE = numpy.zeros_like(ExTE)
cBzTE = numpy.zeros_like(ExTE)
cBxTM = numpy.zeros_like(ExTE)
cByTM = numpy.zeros_like(ExTE)
cBzTM = numpy.zeros_like(ExTE)
for mx, slice in enumerate(self.slicesx):
idx = numpy.where((slice.x1 <= x) & (x < slice.x2))
x2 = x[idx] - slice.x1
x1 = slice.x2 - x[idx]
dx = slice.x2 - slice.x1
for n in range(nmodi):
fi = slice.modih[n].eval(y)
fidot = dot(slice.modih[n]).eval(y)
psi = slice.modie[n].eval(y)
psisueps = sueps(slice.modie[n]).eval(y)
psidotsueps = sueps(dot(slice.modie[n])).eval(y)
kfh = self.modih[n].k[mx]
kxh = scipy.sqrt(kfh**2 - kz**2)
sl = self.modih[n].sl[mx] * (k0/kfh)**2
al = self.modih[n].al[mx]
sr = self.modih[n].sr[mx] * (k0/kfh)**2
ar = self.modih[n].ar[mx]
uh[n,idx] = (numpy.sin(kxh * x1) * sl + numpy.sin(kxh * x2) * sr) / numpy.sin(kxh * dx)
udoth[n,idx] = (numpy.sin(kxh * x1) * al + numpy.sin(kxh * x2) * ar) / numpy.sin(kxh * dx)
kfe = self.modie[n].k[mx]
kxe = scipy.sqrt(kfe**2 - kz**2)
sl = self.modie[n].sl[mx] * (k0/kfe)**2
al = self.modie[n].al[mx]
sr = self.modie[n].sr[mx] * (k0/kfe)**2
ar = self.modie[n].ar[mx]
ue[n,idx] = (numpy.sin(kxe * x1) * sl + numpy.sin(kxe * x2) * sr) / numpy.sin(kxe * dx)
udote[n,idx] = (numpy.sin(kxe * x1) * al + numpy.sin(kxe * x2) * ar) / numpy.sin(kxe * dx)
Exsh[:,n] = (kz/k0) * fi
Exah[:,n] = 0
Exse[:,n] = 0
Exae[:,n] = -psidotsueps / k0**2
Eysh[:,n] = 0
Eyah[:,n] = 0
Eyse[:,n] = -(kfe/k0)**2 * psisueps
Eyae[:,n] = 0
Ezsh[:,n] = 0
Ezah[:,n] = -1j * fi / k0
Ezse[:,n] = 1j * kz / k0**2 * psidotsueps
Ezae[:,n] = 0
cBxsh[:,n] = 0
cBxah[:,n] = fidot / k0**2
cBxse[:,n] = kz / k0 * psi
cBxae[:,n] = 0
cBysh[:,n] = (kfh/k0)**2 * fi
cByah[:,n] = 0
cByse[:,n] = 0
cByae[:,n] = 0
cBzsh[:,n] = -1j * kz / k0**2 * fidot
cBzah[:,n] = 0
cBzse[:,n] = 0
cBzae[:,n] = -1j * psi / k0
ExTE[:,idx] = numpy.tensordot(Exsh, uh[:,idx], axes=1) + numpy.tensordot(Exah, udoth[:,idx], axes=1)
ExTM[:,idx] = numpy.tensordot(Exse, ue[:,idx], axes=1) + numpy.tensordot(Exae, udote[:,idx], axes=1)
EyTE[:,idx] = numpy.tensordot(Eysh, uh[:,idx], axes=1) + numpy.tensordot(Eyah, udoth[:,idx], axes=1)
EyTM[:,idx] = numpy.tensordot(Eyse, ue[:,idx], axes=1) + numpy.tensordot(Eyae, udote[:,idx], axes=1)
EzTE[:,idx] = numpy.tensordot(Ezsh, uh[:,idx], axes=1) + numpy.tensordot(Ezah, udoth[:,idx], axes=1)
EzTM[:,idx] = numpy.tensordot(Ezse, ue[:,idx], axes=1) + numpy.tensordot(Ezae, udote[:,idx], axes=1)
cBxTE[:,idx] = numpy.tensordot(cBxsh, uh[:,idx], axes=1) + numpy.tensordot(cBxah, udoth[:,idx], axes=1)
cBxTM[:,idx] = numpy.tensordot(cBxse, ue[:,idx], axes=1) + numpy.tensordot(cBxae, udote[:,idx], axes=1)
cByTE[:,idx] = numpy.tensordot(cBysh, uh[:,idx], axes=1) + numpy.tensordot(cByah, udoth[:,idx], axes=1)
cByTM[:,idx] = numpy.tensordot(cByse, ue[:,idx], axes=1) + numpy.tensordot(cByae, udote[:,idx], axes=1)
cBzTE[:,idx] = numpy.tensordot(cBzsh, uh[:,idx], axes=1) + numpy.tensordot(cBzah, udoth[:,idx], axes=1)
cBzTM[:,idx] = numpy.tensordot(cBzse, ue[:,idx], axes=1) + numpy.tensordot(cBzae, udote[:,idx], axes=1)
return (ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM)
def fields(self, x=None, y=None):
ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM = self.eval(x, y)
Ex = ExTE + ExTM
Ey = EyTE + EyTM
Ez = EzTE + EzTM
cBx = cBxTE + cBxTM
cBy = cByTE + cByTM
cBz = cBzTE + cBzTM
return (Ex, Ey, Ez, cBx, cBy, cBz)
def intensity(self, x=None, y=None):
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = .5 * (Ex * numpy.conj(cBy) - Ey * numpy.conj(cBx))
return cSz
def TEfrac_old(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz, cSz = self.fields(x, y)
cSTE = .5 * EMpy.utils.trapz2(Ex * numpy.conj(cBy), y, x)
cSTM = .5 * EMpy.utils.trapz2(-Ey * numpy.conj(cBx), y, x)
return numpy.abs(cSTE) / (numpy.abs(cSTE) + numpy.abs(cSTM))
def TEfrac(self):
Sx, Sy = self.__overlap(self)
return Sx / (Sx - Sy)
def overlap_old(self, m, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = self.intensity(x, y)
norm = scipy.sqrt(EMpy.utils.trapz2(cSz, y, x))
Ex1, Ey1, Ez1, cBx1, cBy1, cBz1 = m.fields(x, y)
cSz1 = m.intensity(x, y)
norm1 = scipy.sqrt(EMpy.utils.trapz2(cSz1, y, x))
return .5 * EMpy.utils.trapz2(Ex/norm * numpy.conj(cBy1/norm1) - Ey/norm * numpy.conj(cBx1/norm1), y, x)
def __overlap_old(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phidot_n1 = dot(phi_n1)
psi_n1 = slice.modie[n1]
psisueps_n1 = sueps(psi_n1)
psidotsueps_n1 = sueps(dot(psi_n1))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uhdot_n1 = dot(uh_n1)
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
uedot_n1 = dot(ue_n1)
for n2 in range(nmodi):
phi_n2 = mode.slicesx[mx].modih[n2]
phidot_n2 = dot(phi_n2)
psi_n2 = mode.slicesx[mx].modie[n2]
psisueps_n2 = sueps(psi_n2)
psidotsueps_n2 = sueps(dot(psi_n2))
uh_n2 = copy.deepcopy(mode.modih[n2])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uhdot_n2 = dot(uh_n2)
ue_n2 = copy.deepcopy(mode.modie[n2])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
uedot_n2 = dot(ue_n2)
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def __overlap(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
phi_n1s = []
phidot_n1s = []
psi_n1s = []
psisueps_n1s = []
psidotsueps_n1s = []
uh_n1s = []
uhdot_n1s = []
ue_n1s = []
uedot_n1s = []
kfe_n1s = []
kfh_n1s = []
phi_n2s = []
phidot_n2s = []
psi_n2s = []
psisueps_n2s = []
psidotsueps_n2s = []
uh_n2s = []
uhdot_n2s = []
ue_n2s = []
uedot_n2s = []
kfe_n2s = []
kfh_n2s = []
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phi_n1s.append(phi_n1)
phidot_n1s.append(dot(phi_n1))
psi_n1 = slice.modie[n1]
psi_n1s.append(psi_n1)
psisueps_n1s.append(sueps(psi_n1))
psidotsueps_n1s.append(sueps(dot(psi_n1)))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
kfh_n1s.append(kfh_n1)
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uh_n1s.append(uh_n1)
uhdot_n1s.append(dot(uh_n1))
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
kfe_n1s.append(kfe_n1)
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
ue_n1s.append(ue_n1)
uedot_n1s.append(dot(ue_n1))
phi_n2 = mode.slicesx[mx].modih[n1]
phi_n2s.append(phi_n2)
phidot_n2s.append(dot(phi_n2))
psi_n2 = mode.slicesx[mx].modie[n1]
psi_n2s.append(psi_n2)
psisueps_n2s.append(sueps(psi_n2))
psidotsueps_n2s.append(sueps(dot(psi_n2)))
uh_n2 = copy.deepcopy(mode.modih[n1])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
kfh_n2s.append(kfh_n2)
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uh_n2s.append(uh_n2)
uhdot_n2s.append(dot(uh_n2))
ue_n2 = copy.deepcopy(mode.modie[n1])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
kfe_n2s.append(kfe_n2)
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
ue_n2s.append(ue_n2)
uedot_n2.append(dot(ue_n2))
for n1 in range(nmodi):
uh_n1 = uh_n1s[n1]
ue_n1 = ue_n1s[n1]
uedot_n1 = uedot_n1s[n1]
phi_n1 = phi_n1s[n1]
psi_n1 = psi_n1s[n1]
psidotsueps_n1 = psidotsueps_n1s[n1]
psisueps_n1 = psisueps_n1s[n1]
kfe_n1 = kfe_n1s[n1]
for n2 in range(nmodi):
uh_n2 = uh_n2s[n2]
uhdot_n2 = uhdot_n2s[n2]
ue_n2 = ue_n2s[n2]
phi_n2 = phi_n2s[n2]
phidot_n2 = phidot_n2s[n2]
psi_n2 = psi_n2s[n2]
kfh_n2 = kfh_n2s[n2]
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def overlap(self, mode):
Sx, Sy = self.__overlap(mode)
return Sx - Sy
def norm(self):
return scipy.sqrt(self.overlap(self))
def normalize(self):
n = self.norm()
for ue, uh in zip(self.modie, self.modih):
ue.sl /= n
ue.al /= n
ue.sr /= n
ue.ar /= n
uh.sl /= n
uh.al /= n
uh.sr /= n
uh.ar /= n
def get_fields_for_FDTD(self, x, y):
"""Get mode's field on a staggered grid.
Note: ignores some fields on the boudaries.
"""
x0 = self.get_x()
y0 = self.get_y()
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x0, y0)
# Ex: ignores y = 0, max
x_Ex_FDTD = EMpy.utils.centered1d(x)
y_Ex_FDTD = y[1:-1]
Ex_FDTD = EMpy.utils.interp2(x_Ex_FDTD, y_Ex_FDTD, x0, y0, Ex)
# Ey: ignores x = 0, max
x_Ey_FDTD = x[1:-1]
y_Ey_FDTD = EMpy.utils.centered1d(y)
Ey_FDTD = EMpy.utils.interp2(x_Ey_FDTD, y_Ey_FDTD, x0, y0, Ey)
# Ez: ignores x, y = 0, max
x_Ez_FDTD = x[1:-1]
y_Ez_FDTD = y[1:-1]
Ez_FDTD = EMpy.utils.interp2(x_Ez_FDTD, y_Ez_FDTD, x0, y0, Ez)
# Hx: ignores x = 0, max, /120pi, reverse direction
x_Hx_FDTD = x[1:-1]
y_Hx_FDTD = EMpy.utils.centered1d(y)
Hx_FDTD = EMpy.utils.interp2(x_Hx_FDTD, y_Hx_FDTD, x0, y0, cBx) / (-120. * numpy.pi) # OKKIO!
# Hy: ignores y = 0, max, /120pi, reverse direction
x_Hy_FDTD = EMpy.utils.centered1d(x)
y_Hy_FDTD = y[1:-1]
Hy_FDTD = EMpy.utils.interp2(x_Hy_FDTD, y_Hy_FDTD, x0, y0, Hy) / (-120. * numpy.pi)
# Hz: /120pi, reverse direction
x_Hz_FDTD = EMpy.utils.centered1d(x)
y_Hz_FDTD = EMpy.utils.centered1d(y)
Hz_FDTD = EMpy.utils.interp2(x_Hz_FDTD, y_Hz_FDTD, x0, y0, Hz) / (-120. * numpy.pi)
return (Ex_FDTD, Ey_FDTD, Ez_FDTD, Hx_FDTD, Hy_FDTD, Hz_FDTD)
def plot(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
f = self.fields(x, y)
# fields
pylab.figure()
titles = ['Ex', 'Ey', 'Ez', 'cBx', 'cBy', 'cBz']
for i in range(6):
subplot_id = 231 + i
pylab.subplot(subplot_id)
pylab.contour(x, y, numpy.abs(f[i]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(titles[i])
pylab.axis('image')
pylab.show()
# power
pylab.figure()
pylab.contour(x, y, numpy.abs(f[-1]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('cSz')
pylab.axis('image')
pylab.show()
def __str__(self):
return 'neff = %s' % (self.keff / (2 * numpy.pi / self.slicesx[0].wl))
class FMM(ModeSolver):
pass
class FMM1d(FMM):
"""Drive to simulate 1d structures.
Examples
========
Find the first 3 TE modes of two slabs of refractive indeces 1 and 3,
of thickness 1um each, for wl = 1, with symmetric boundary conditions:
>>> import numpy
>>> import FMM
>>> Uy = numpy.array([0., 1., 2.])
>>> ny = numpy.array([1., 3.])
>>> wl = 1.
>>> nmodi = 3
>>> simul = FMM.FMM1d(Uy, ny, 'SS').solve(wl, nmodi, 'TE')
>>> keff_0_expected = 18.790809413149393
>>> keff_1_expected = 18.314611633384185
>>> keff_2_expected = 17.326387847565034
>>> assert(numpy.allclose(simul.modes[0].keff, keff_0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff_1_expected))
>>> assert(numpy.allclose(simul.modes[2].keff, keff_2_expected))
"""
def __init__(self, Uy, ny, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Uy = Uy
self.ny = ny
self.boundary = boundary
def solve(self, wl, nmodes, polarization, verbosity=0):
"""Find nmodes modes at a given wavelength and polarization."""
Message('Solving 1d modes.', 1).show(verbosity)
self.wl = wl
self.nmodes = nmodes
self.polarization = polarization
self.modes = FMM1d_y(self.Uy, self.ny, self.wl, self.nmodes, self.boundary, self.polarization, verbosity)
return self
class FMM2d(FMM):
"""Drive to simulate 2d structures.
Examples
========
Find the first 2 modes of a lossy Si channel waveguide in SiO2, using
only 3 1dmodes and with electric and magnetic bc on x and y, respectively:
>>> import numpy
>>> import FMM
>>> wl = 1.55
>>> nmodislices = 3
>>> nmodi2d = 2
>>> Ux = numpy.array([0, 2, 2.4, 4.4])
>>> Uy = numpy.array([0, 2, 2.22, 4.22])
>>> boundary = Boundary(xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall')
>>> n2 = 1.446
>>> n1 = 3.4757 - 1e-4j
>>> refindex = numpy.array([[n2, n2, n2],
[n2, n1, n2],
[n2, n2, n2]])
>>> simul = FMM.FMM2d(Ux, Uy, refindex, boundary).solve(wl, nmodislices, nmodi2d)
>>> keff0_expected = 9.666663697969399e+000 -4.028846755836984e-004j
>>> keff1_expected = 7.210476803133368e+000 -2.605078086535284e-004j
>>> assert(numpy.allclose(simul.modes[0].keff, keff0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff1_expected))
"""
def __init__(self, Ux, Uy, rix, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Ux = Ux
self.Uy = Uy
self.rix = rix
self.boundary = boundary
def solve(self, wl, n1dmodes, nmodes, verbosity=0):
"""Find nmodes modes at a given wavelength using n1dmodes 1d modes in each slice."""
Message('Solving 2d modes', 1).show(verbosity)
self.wl = wl
self.n1dmodes = n1dmodes
self.nmodes = nmodes
self.slices = script1d(self.Ux, self.Uy, self.rix, self.wl, self.boundary, self.n1dmodes, verbosity)
self.modes = FMM1d_x_component(self.slices, nmodes, verbosity)
return self
def analyticalsolution(nmodi, TETM, FMMpars):
betay = FMMpars['beta']
epsilon = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
Nregions = len(epsilon)
sl = numpy.zeros((nmodi,Nregions), dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
# interval
D = Uy[-1] - Uy[0]
if TETM == 'TE':
N = numpy.sqrt(2. / D)
else:
N = numpy.sqrt(2. / D * epsilon[0])
# boundary condition
if by == 'AA':
kn = (numpy.pi * numpy.arange(1, nmodi + 1) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'AS':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'SA':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
al[:, 0] = 0.
elif by == 'SS':
kn = (numpy.pi * numpy.arange(0, nmodi) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
al[:, 0] = 0.
# normalizzazione
sl *= N
sr *= N
for n in range(0, nmodi):
al[n,:] *= N * kn[n]
ar[n,:] *= N * kn[n]
# caso speciale. se k=0 la funzione e' costante e la normalizzazione e'
# diversa. capita solo con boundary SS e per il primo modo
if by == 'SS':
sqrt2 = numpy.sqrt(2.)
sl[0,:] /= sqrt2
sr[0,:] /= sqrt2
al[0,:] /= sqrt2
ar[0,:] /= sqrt2
modi = []
for mk in range(0, nmodi):
modo = FMMMode1dy()
modo.sl = sl[mk,:].astype(complex)
modo.sr = sr[mk,:].astype(complex)
modo.al = al[mk,:].astype(complex)
modo.ar = ar[mk,:].astype(complex)
modo.k = kn[mk] * numpy.ones(Nregions)
modo.U = Uy
modo.keff = scipy.sqrt(betay[0]**2 - kn[mk]**2)
modo.zero = 0.
modo.pars = FMMpars
modi.append(modo)
return modi
def sinxsux(x):
return numpy.sinc(x / numpy.pi)
def FMMshootingTM(kz_, FMMpars):
betay = FMMpars['beta']
eps = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx+1] = sr[idx];
al[idx+1] = ar[idx] / eps[idx] * eps[idx + 1];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1] / eps[idx1] * eps[idx1 - 1]
#*******************
Delta[m] = (eps[n1-1] * sr[n1-1] * al[n2-1] - eps[n2-1] * ar[n1-1] * sl[n2-1])
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def FMMshooting(kz_, FMMpars):
betay = FMMpars['beta']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx + 1] = sr[idx];
al[idx + 1] = ar[idx];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1]
#*******************
Delta[m] = (sr[n1-1] * al[n2-1] - ar[n1-1] * sl[n2-1])
## len_kz = len(kz)
## k = k_[0,:]
## sinkdsuk = sinkdsuk_[0,:][0]
## coskd = coskd_[0,:][0]
## sinkdk = sinkdk_[0,:][0]
## code = """
## for (int m = 0; m < len_kz; ++m) {
## //k = k_(m,:);
## //sinkdsuk = sinkdsuk_(0,:);
## //coskd = coskd_(0,:);
## //sinkdk = sinkdk_(0,:);
## int nn1 = int(n1);
## for (int idx = 0; idx < nn1; ++idx) {
## sr(idx) = sl(idx) * coskd(idx) + al(idx) * sinkdsuk(idx);
## ar(idx) = al(idx) * coskd(idx) - sl(idx) * sinkdk(idx);
## if (idx < nn1 - 1) {
## sl(idx + 1) = sr(idx);
## al(idx + 1) = ar(idx);
## }
## }
## int nn2 = int(n2);
## for (int idx1 = Nregions - 1; idx1 > nn2 - 2; --idx1) {
## sl(idx1) = sr(idx1) * coskd(idx1) - ar(idx1) * sinkdsuk(idx1);
## al(idx1) = ar(idx1) * coskd(idx1) + sr(idx1) * sinkdk(idx1);
## if (idx1 > nn2) {
## sr(idx1 - 1) = sl(idx1);
## ar(idx1 - 1) = al(idx1);
## }
## }
## //Delta(m) = std::complex<double>(1) * (sr(nn1-1) * al(nn2-1) - ar(nn1-1) * sl(nn2-1));
## }
## """
##
## from scipy import weave
## from scipy.weave import converters
## weave.inline(code,
## ['n1', 'n2', 'Nregions', 'sl', 'sr', 'al', 'ar', 'len_kz', 'Delta',
## 'k', 'sinkdsuk', 'sinkdk', 'coskd',
## 'k_', 'sinkdsuk_', 'sinkdk_', 'coskd_'],
## type_converters = converters.blitz,
## compiler = 'gcc')
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def remove_consecutives(x, y):
b = numpy.r_[numpy.diff(x) == 1, 0].astype(int)
ic = 0
flag = 0
l = []
for ib in range(len(b)):
if flag == 0:
c = [x[ib]]
ic += 1
if b[ib] == 1:
flag = 1
else:
l.append(c)
else:
c.append(x[ib])
if b[ib] != 1:
flag = 0
l.append(c)
index = []
for il, ll in enumerate(l):
newi = ll
itmp = numpy.argmax(y[newi])
index.append(newi[0] + itmp)
return index
def findzerosnew(x, y, searchinterval):
minsi = 2 * numpy.abs(x[1] - x[0])
if searchinterval < minsi:
searchinterval = minsi
dy = numpy.r_[0, numpy.diff(numpy.diff(scipy.log(y))), 0]
idy = numpy.where(dy > 0.005)[0]
if len(idy) == 0:
zeri = numpy.array([])
z1 = numpy.array([])
z2 = numpy.array([])
else:
ind = remove_consecutives(idy, dy)
zeri = x[ind]
z1 = numpy.zeros_like(zeri)
z2 = numpy.zeros_like(zeri)
dz = numpy.abs(numpy.diff(zeri))
if len(dz) == 0:
z1[0] = zeri - searchinterval/2
z2[0] = zeri + searchinterval/2
else:
delta = numpy.min([dz[0], searchinterval])
z1[0] = zeri[0] - delta/2
z2[0] = zeri[0] + delta/2
for idx in range(1, len(zeri) - 1):
delta = numpy.min([dz[idx - 1], dz[idx], searchinterval])
z1[idx] = zeri[idx] - delta/2
z2[idx] = zeri[idx] + delta/2
delta = numpy.min([dz[-1], searchinterval])
z1[-1] = zeri[-1] - delta/2
z2[-1] = zeri[-1] + delta/2
return (zeri, z1, z2)
def absfzzero2(t, f, xmin, xmax, ymin, ymax):
xmean = numpy.mean([xmin, xmax])
ymean = numpy.mean([ymin, ymax])
xwidth = xmax - xmin
ywidth = ymax - ymin
x = xmean + xwidth * t[0] / (1. + numpy.abs(t[0])) / 2.
y = ymean + ywidth * t[1] / (1. + numpy.abs(t[1])) / 2.
z = x + 1j * y
fv = f(z)
return numpy.abs(fv)**2
def fzzeroabs2(f, zmin, zmax):
xmin = numpy.real(zmin)
ymin = numpy.imag(zmin)
xmax = numpy.real(zmax)
ymax = numpy.imag(zmax)
tx0 = 0.
ty0 = 0.
t0 = scipy.optimize.fmin(lambda t: absfzzero2(t, f, xmin, xmax, ymin, ymax), [tx0, ty0],
maxiter=100000, maxfun=100000, xtol=1e-15, ftol=1e-15, disp=0)
xmean = numpy.mean([xmin, xmax])
ymean = numpy.mean([ymin, ymax])
xwidth = xmax - xmin
ywidth = ymax - ymin
x0 = xmean + xwidth * t0[0] / (1 + numpy.abs(t0[0])) / 2
y0 = ymean + ywidth * t0[1] / (1 + numpy.abs(t0[1])) / 2
z0 = x0 + 1j * y0
valf = f(z0)
return (z0, valf)
def scalarprod(modo1, modo2):
d = numpy.diff(modo1.U)
ky1 = modo1.k
al1 = modo1.al
sl1 = modo1.sl
ar1 = modo1.ar
sr1 = modo1.sr
ky2 = modo2.k
al2 = modo2.al
sl2 = modo2.sl
ar2 = modo2.ar
sr2 = modo2.sr
Nlayers = len(modo1.sl)
scprod = numpy.zeros_like(modo1.sl)
for idy in range(Nlayers):
if numpy.allclose(ky1[idy], ky2[idy]):
if numpy.linalg.norm(ky1) < 1e-10:
scprod[idy] = sl1[idy] * sl2[idy] * (modo1.U[idy+1] - modo1.U[idy])
else:
scprod[idy] = (sl1[idy] * al2[idy] - sr1[idy] * ar2[idy]) / ky1[idy] / ky2[idy] / 2. + \
d[idy]/2. * (sl1[idy] * sl2[idy] + al1[idy] * al2[idy] / ky1[idy] / ky2[idy])
else:
if numpy.linalg.norm(ky1) < 1e-10:
scprod[idy] = sl1[idy] * (al2[idy] - ar2[idy]) / ky2[idy]**2
elif numpy.linalg.norm(ky2) < 1e-10:
scprod[idy] = sl2[idy] * (al1[idy] - ar1[idy]) / ky1[idy]**2
else:
scprod[idy] = (sr1[idy] * ar2[idy] - ar1[idy] * sr2[idy] -
sl1[idy] * al2[idy] + al1[idy] * sl2[idy]) / (ky1[idy]**2 - ky2[idy]**2)
return numpy.sum(scprod)
def sueps(modo):
eps = modo.pars['epsilon']
modosueps = copy.deepcopy(modo)
modosueps.sl /= eps
modosueps.sr /= eps
modosueps.al /= eps
modosueps.ar /= eps
return modosueps
def FMM1d_y(Uy, ny, wl, nmodi, boundaryRL, TETM, verbosity=0):
k0 = 2 * numpy.pi / wl
betay = ny * k0
Nstepskz = 1543
searchinterval = max(50. / Nstepskz, numpy.abs(numpy.min(numpy.imag(2. * betay))))
imsearchinterval = 10 * k0
ypointsperregion = 5000
FMMpars = {'epsilon': ny**2, 'beta': betay, 'boundary': boundaryRL, 'Uy': Uy}
# analytical solution
if numpy.allclose(ny, ny[0]):
Message('Uniform slice found: using analytical solution.', 2).show(verbosity)
return analyticalsolution(nmodi, TETM, FMMpars)
##rekz = numpy.linspace(numpy.max(numpy.real(betay)) + searchinterval, 0., Nstepskz)
rekz2 = numpy.linspace((numpy.max(numpy.real(betay))+searchinterval)**2, 0., Nstepskz)
rekz = scipy.sqrt(rekz2)
if TETM == 'TM':
Message('Shooting TM.', 3).show(verbosity)
matchingre, modotmp = FMMshootingTM(rekz, FMMpars)
else:
Message('Shooting TE.', 3).show(verbosity)
matchingre, modotmp = FMMshooting(rekz, FMMpars)
nre = rekz / k0
nre2 = rekz2 / k0**2
##zerire, z1, z2 = findzerosnew(nre, numpy.abs(matchingre), searchinterval / k0)
zerire2, z12, z22 = findzerosnew(nre2, numpy.abs(matchingre), (searchinterval / k0)**2)
zerire = scipy.sqrt(zerire2)
kz1 = zerire * k0 - searchinterval / 2. + 1j * imsearchinterval
kz2 = zerire * k0 + searchinterval / 2. - 1j * imsearchinterval
Message('Found %d real zeros.' % len(zerire), 2).show(verbosity)
if len(zerire) < nmodi:
Message('Number of real zeros not enough: scan imaginary axis.', 2).show(verbosity)
imkza = -numpy.max(numpy.real(betay))
imkzb = 0.
while len(kz1) < nmodi:
imkza = imkza + numpy.max(numpy.real(betay))
imkzb = imkzb + numpy.max(numpy.real(betay))
##imkz = numpy.linspace(imkza, imkzb, Nstepskz)
imkz2 = numpy.linspace(imkza**2, imkzb**2, Nstepskz)
imkz = scipy.sqrt(imkz2)
if TETM == 'TM':
matchingim, modotmp = FMMshootingTM(1j * imkz, FMMpars)
else:
matchingim, modotmp = FMMshooting(1j * imkz, FMMpars)
nim = imkz * wl / 2. / numpy.pi
nim2 = imkz2 * (wl / 2. / numpy.pi)**2
##zeriim, z1, z2 = findzerosnew(nim, numpy.abs(matchingim), searchinterval / k0)
zeriim2, z12, z22 = findzerosnew(nim2, numpy.abs(matchingim), (searchinterval / k0)**2)
zeriim = scipy.sqrt(zeriim2)
Message('Found %d imag zeros.' % len(zeriim), 2).show(verbosity)
kz1 = numpy.r_[kz1, 1j * (zeriim * k0 - imsearchinterval / 2. + 1j * searchinterval / 2.)]
kz2 = numpy.r_[kz2, 1j * (zeriim * k0 + imsearchinterval / 2. - 1j * searchinterval / 2.)]
mk = 0
modi = []
# inizia il ciclo sugli intervalli
Message('Refine zeros.', 2).show(verbosity)
for m in range(0, len(kz1)):
if mk == nmodi:
break
if TETM == 'TM':
z0, valf = fzzeroabs2(lambda kz: FMMshootingTM(kz, FMMpars)[0], kz1[m], kz2[m])
z0 = numpy.atleast_1d(z0)
else:
z0, valf = fzzeroabs2(lambda kz: FMMshooting(kz, FMMpars)[0], kz1[m], kz2[m])
z0 = numpy.atleast_1d(z0)
if len(z0) > 0:
if TETM == 'TM':
zero, modo = FMMshootingTM(z0, FMMpars)
else:
zero, modo = FMMshooting(z0, FMMpars)
if TETM == 'TM':
normalizzazione = 1. / numpy.sqrt(scalarprod(modo, sueps(modo)))
else:
normalizzazione = 1. / numpy.sqrt(scalarprod(modo, modo))
modo.sl *= normalizzazione
modo.al *= normalizzazione
modo.sr *= normalizzazione
modo.ar *= normalizzazione
mk += 1
modi.append(modo)
return modi
def script1d(Ux, Uy, refindex, wl, boundary, nmodislices, verbosity=0):
nx = refindex.shape[0]
slices = []
for m in range(nx):
Message('Finding 1dmodes TE.', 1).show(verbosity)
ymodih = FMM1d_y(Uy, refindex[m,:], wl, nmodislices, boundary.yh, 'TE', verbosity)
Message('Finding 1dmodes TM.', 1).show(verbosity)
ymodie = FMM1d_y(Uy, refindex[m,:], wl, nmodislices, boundary.ye, 'TM', verbosity)
slice = Slice(x1=Ux[m], x2=Ux[m+1], Uy=Uy, boundary=boundary, modie=ymodie, modih=ymodih)
# OKKIO: do I really need them?
slice.Ux = Ux
slice.refractiveindex = refindex
slice.epsilon = refindex**2
slice.wl = wl
slices.append(slice)
return slices
def dot(modo):
k = modo.k
mododot = copy.deepcopy(modo)
mododot.sl = modo.al
mododot.sr = modo.ar
mododot.al = -k**2 * modo.sl
mododot.ar = -k**2 * modo.sr
return mododot
def genera_rotazione(slices):
nmodi = len(slices[0].modih)
k0 = 2 * numpy.pi / slices[0].wl
Nslices = len(slices);
R = Struct()
# alloc R
R.Ree = numpy.zeros((nmodi, nmodi, Nslices-1), dtype=complex)
R.Reem = numpy.zeros_like(R.Ree)
R.Rhh = numpy.zeros_like(R.Ree)
R.Rhhm = numpy.zeros_like(R.Ree)
R.Rhe = numpy.zeros_like(R.Ree)
R.Rhem = numpy.zeros_like(R.Ree)
for idx in range(len(slices) - 1):
slice = slices[idx]
slicep1 = slices[idx + 1]
for n in range(nmodi):
Fhn = slice.modih[n]
Fp1hn = slicep1.modih[n]
Fen = slice.modie[n]
Fp1en = slicep1.modie[n]
Fhndot = dot(Fhn)
Fp1hndot = dot(Fp1hn)
khidx = slice.modih[n].keff
khidxp1 = slicep1.modih[n].keff
for m in range(nmodi):
Fem = slice.modie[m]
Fhm = slice.modih[m]
Fp1em = slicep1.modie[m]
Fp1hm = slicep1.modih[m]
Femsueps = sueps(Fem)
Femdotsueps = dot(Femsueps)
Fp1emsueps = sueps(Fp1em)
Fp1emdotsueps = dot(Fp1emsueps)
keidx = slice.modie[m].keff
keidxp1 = slicep1.modie[m].keff
R.Ree[n, m, idx] = scalarprod(Fen, Fp1emsueps)
R.Reem[n, m, idx] = scalarprod(Fp1en, Femsueps)
R.Rhh[n, m, idx] = scalarprod(Fhn, Fp1hm)
R.Rhhm[n, m, idx] = scalarprod(Fp1hn, Fhm)
s1 = k0 * scalarprod(Fhndot, Fp1emsueps) / khidx**2
s2 = k0 * scalarprod(Fhn, Fp1emdotsueps) / keidxp1**2
R.Rhe[n, m, idx] = (s1 + s2).item()
s1 = k0 * scalarprod(Fp1hndot, Femsueps) / khidxp1**2
s2 = k0 * scalarprod(Fp1hn, Femdotsueps) / keidx**2
R.Rhem[n, m, idx] = (s1 + s2).item()
return R
def ortonormalita(slices):
nmodi = len(slices[0].modih)
k0 = 2 * numpy.pi / slices[0].wl
Nslices = len(slices);
neesueps = numpy.zeros(Nslices, dtype=complex)
nhh = numpy.zeros_like(neesueps)
nRhe = numpy.zeros_like(neesueps)
nRee = numpy.zeros_like(neesueps)
nRhh = numpy.zeros_like(neesueps)
nAC = numpy.zeros_like(neesueps)
M = Struct()
M.ee = numpy.zeros((nmodi, nmodi, Nslices), dtype=complex)
M.eesueps = numpy.zeros_like(M.ee)
M.hh = numpy.zeros_like(M.ee)
M.Rhe = numpy.zeros_like(M.ee)
for idx, slice in enumerate(slices):
for n in range(nmodi):
Fhn = slice.modih[n]
Fen = slice.modie[n]
khidx = slice.modih[n].keff
for m in range(nmodi):
Fem = slice.modie[m]
Fhm = slice.modih[m]
keidxp1 = slice.modie[m].keff
M.ee[n, m, idx] = scalarprod(Fen, Fem)
M.eesueps[n, m, idx] = scalarprod(Fen, sueps(Fem))
M.hh[n, m, idx] = scalarprod(Fhn, Fhm)
Fp1em = slice.modie[m]
s1 = k0 * scalarprod(dot(Fhn), sueps(Fp1em)) / khidx**2
s2 = k0 * scalarprod(Fhn, sueps(dot(Fp1em))) / keidxp1**2
M.Rhe[n, m, idx] = (s1 + s2).item()
R = genera_rotazione(slices)
Ident = numpy.eye(nmodi)
for idx in range(Nslices):
neesueps[idx] = numpy.linalg.norm(M.eesueps[:,:,idx] - Ident)
nhh[idx] = numpy.linalg.norm(M.hh[:,:,idx] - Ident)
nRhe[idx] = numpy.linalg.norm(M.Rhe[:,:,idx])
for idx in range(Nslices-1):
nRee[idx] = numpy.linalg.norm(numpy.dot(R.Ree[:,:,idx], R.Reem[:,:,idx]) - Ident)
nRhh[idx] = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,idx], R.Rhhm[:,:,idx]) - Ident)
nAC[idx] = numpy.linalg.norm(numpy.dot(R.Rhe[:,:,idx], R.Reem[:,:,idx]) +
numpy.dot(R.Rhh[:,:,idx], R.Rhem[:,:,idx]))
ns1 = numpy.linalg.norm(numpy.r_[neesueps, nhh, nRhe])
ns2 = numpy.linalg.norm(numpy.r_[nRee, nRhh, nAC])
errore = numpy.linalg.norm(numpy.r_[ns1, ns2]) / scipy.sqrt(8 * nmodi)
return errore
def method_of_component(kz_, slices, Rot, uscelto=None, icomp=None):
kz = numpy.atleast_1d(kz_)
abscomp = numpy.zeros(len(kz))
## tmp = 500 # OKKIO: perche' 500?
tmp = 100 * len(slices[0].modie) * (len(slices) - 1) # OKKIO: dimension of Mvec * 50. enough?
normu = numpy.zeros(tmp, dtype=complex)
for m in range(len(kz)):
M = Mvec(kz[m], slices, Rot)
urn = numpy.zeros((M.shape[0], tmp), dtype=complex)
if (uscelto is None) and (icomp is None):
for k in range(tmp):
numpy.random.seed()
ur = numpy.random.rand(M.shape[0])
urn[:,k] = ur / numpy.linalg.norm(ur)
normu[k] = numpy.linalg.norm(numpy.dot(M, urn[:,k]))
iurn = numpy.argmin(normu)
uscelto = urn[:, iurn]
icomp = numpy.argmax(uscelto)
Mmeno1u = numpy.linalg.solve(M, uscelto)
abscomp[m] = 1. / numpy.linalg.norm(Mmeno1u)
return (abscomp, uscelto, icomp)
def creaTeThSeSh(kz, slices):
Nslices = len(slices)
nmodi = len(slices[0].modie)
d = numpy.array([s.x2 - s.x1 for s in slices])
k0 = 2. * numpy.pi / slices[0].wl
Th = numpy.zeros((nmodi, Nslices), dtype=complex)
Sh = numpy.zeros_like(Th)
Te = numpy.zeros_like(Th)
Se = numpy.zeros_like(Th)
Thleft = numpy.zeros_like(Th)
Teleft = numpy.zeros_like(Th)
Thright = numpy.zeros_like(Th)
Teright = numpy.zeros_like(Th)
for idx in range(Nslices):
ke = numpy.array([m.keff.item() for m in slices[idx].modie])
kh = numpy.array([m.keff.item() for m in slices[idx].modih])
kxh = scipy.sqrt(kh**2 - kz**2)
kxe = scipy.sqrt(ke**2 - kz**2)
Th[:,idx] = (k0/kh)**2 * kxh / numpy.tan(kxh * d[idx])
Sh[:,idx] = (k0/kh)**2 * kxh / numpy.sin(kxh * d[idx])
Te[:,idx] = (k0/ke)**2 * kxe / numpy.tan(kxe * d[idx])
Se[:,idx] = (k0/ke)**2 * kxe / numpy.sin(kxe * d[idx])
ke = numpy.array([m.keff.item() for m in slices[0].modie])
kh = numpy.array([m.keff.item() for m in slices[0].modih])
kxh = scipy.sqrt(kh**2 - kz**2)
kxe = scipy.sqrt(ke**2 - kz**2)
if slices[0].boundary.xleft == 'Electric Wall':
# ah = 0
Thleft = -(k0/kh)**2 * kxh * numpy.tan(kxh * d[0])
Teleft = Te[:,0]
else:
# ae = 0
Teleft = -(k0/ke)**2 * kxe * numpy.tan(kxe * d[0])
Thleft = Th[:,0]
ke = numpy.array([m.keff.item() for m in slices[-1].modie])
kh = numpy.array([m.keff.item() for m in slices[-1].modih])
kxh = scipy.sqrt(kh**2 - kz**2)
kxe = scipy.sqrt(ke**2 - kz**2)
if slices[-1].boundary.xleft == 'Electric Wall':
# ah = 0
Thright = -(k0/kh)**2 * kxh * numpy.tan(kxh * d[-1])
Teright = Te[:,-1]
else:
# ae = 0
Teright = -(k0/ke)**2 * kxe * numpy.tan(kxe * d[-1])
Thright = Th[:,-1]
return (Te, Th, Se, Sh, Teleft, Teright, Thleft, Thright)
def Mvec(kz, slices, R):
Nslices = len(slices)
Rhh = R.Rhh
Ree = R.Ree
Rhe = R.Rhe
Rhem = R.Rhem
Rhhm = R.Rhhm
Te, Th, Se, Sh, Teleft, Teright, Thleft, Thright = creaTeThSeSh(kz,slices)
Te[:,0] = Teleft
Te[:,-1] = Teright
Th[:,0] = Thleft
Th[:,-1] = Thright
# case Nslices=2
if Nslices==2:
raise NotImplementedError('2 slices not implemented yet.')
dim1 = Th.shape[0]
M = numpy.zeros((2 * dim1 * (Nslices-1), 2 * dim1 * (Nslices-1)), dtype=complex)
Dim1 = numpy.arange(dim1)
for idx in range(3, Nslices):
idxeJA = (2 * idx - 4) * dim1
idxeJB = (2 * idx - 6) * dim1
idxeJC = (2 * idx - 2) * dim1
idxeJD = (2 * idx - 3) * dim1
idxeIA = (2 * idx - 4) * dim1
idxeIB = (2 * idx - 4) * dim1
idxeIC = (2 * idx - 4) * dim1
idxeID = (2 * idx - 4) * dim1
idxhJA = (2 * idx - 3) * dim1
idxhJB = (2 * idx - 5) * dim1
idxhJC = (2 * idx - 1) * dim1
idxhJD = (2 * idx - 4) * dim1
idxhIA = (2 * idx - 3) * dim1
idxhIB = (2 * idx - 3) * dim1
idxhIC = (2 * idx - 3) * dim1
idxhID = (2 * idx - 3) * dim1
IAe = Dim1 + idxeIA
JAe = Dim1 + idxeJA
IBe = Dim1 + idxeIB
JBe = Dim1 + idxeJB
ICe = Dim1 + idxeIC
JCe = Dim1 + idxeJC
IDe = Dim1 + idxeID
JDe = Dim1 + idxeJD
IAh = Dim1 + idxhIA
JAh = Dim1 + idxhJA
IBh = Dim1 + idxhIB
JBh = Dim1 + idxhJB
ICh = Dim1 + idxhIC
JCh = Dim1 + idxhJC
IDh = Dim1 + idxhID
JDh = Dim1 + idxhJD
M[numpy.ix_(IAe, JAe)] = numpy.dot(Ree[:,:,idx-1].T * Te[:,idx-1], Ree[:,:,idx-1]) + numpy.diag(Te[:,idx])
M[numpy.ix_(IBe, JBe)] = -Ree[:,:,idx-1].T * Se[:,idx-1]
M[numpy.ix_(ICe, JCe)] = -Se[:,numpy.newaxis,idx] * Ree[:,:,idx]
M[numpy.ix_(IDe, JDe)] = -kz * numpy.dot(Ree[:,:,idx-1].T, Rhem[:,:,idx-1].T)
M[numpy.ix_(IAh, JAh)] = numpy.dot(Rhhm[:,:,idx-1] * Th[:,idx-1], Rhh[:,:,idx-1]) + numpy.diag(Th[:,idx])
M[numpy.ix_(IBh, JBh)] = -Rhhm[:,:,idx-1] * Sh[:,idx-1]
M[numpy.ix_(ICh, JCh)] = -Sh[:,numpy.newaxis,idx] * Rhh[:,:,idx]
M[numpy.ix_(IDh, JDh)] = kz * numpy.dot(Rhhm[:,:,idx-1], Rhe[:,:,idx-1])
idx = 2
idxeJA = (2 * idx - 4) * dim1
idxeJC = (2 * idx - 2) * dim1
idxeJD = (2 * idx - 3) * dim1
idxeIA = (2 * idx - 4) * dim1
idxeIC = (2 * idx - 4) * dim1
idxeID = (2 * idx - 4) * dim1
idxhJA = (2 * idx - 3) * dim1
idxhJC = (2 * idx - 1) * dim1
idxhJD = (2 * idx - 4) * dim1
idxhIA = (2 * idx - 3) * dim1
idxhIC = (2 * idx - 3) * dim1
idxhID = (2 * idx - 3) * dim1
IAe = Dim1 + idxeIA
JAe = Dim1 + idxeJA
ICe = Dim1 + idxeIC
JCe = Dim1 + idxeJC
IDe = Dim1 + idxeID
JDe = Dim1 + idxeJD
IAh = Dim1 + idxhIA
JAh = Dim1 + idxhJA
ICh = Dim1 + idxhIC
JCh = Dim1 + idxhJC
IDh = Dim1 + idxhID
JDh = Dim1 + idxhJD
idx -= 1
M[numpy.ix_(IAe, JAe)] = numpy.dot(Ree[:,:,idx-1].T * Te[:,idx-1], Ree[:,:,idx-1]) + numpy.diag(Te[:,idx])
M[numpy.ix_(ICe, JCe)] = -Se[:,numpy.newaxis,idx] * Ree[:,:,idx]
M[numpy.ix_(IDe, JDe)] = -kz * numpy.dot(Ree[:,:,idx-1].T, Rhem[:,:,idx-1].T)
M[numpy.ix_(IAh, JAh)] = numpy.dot(Rhhm[:,:,idx-1] * Th[:,idx-1], Rhh[:,:,idx-1]) + numpy.diag(Th[:,idx])
M[numpy.ix_(ICh, JCh)] = -Sh[:,numpy.newaxis,idx] * Rhh[:,:,idx]
M[numpy.ix_(IDh, JDh)] = kz * numpy.dot(Rhhm[:,:,idx-1], Rhe[:,:,idx-1])
idx = Nslices
idxeJA = (2 * idx - 4) * dim1
idxeJB = (2 * idx - 6) * dim1
idxeJD = (2 * idx - 3) * dim1
idxeIA = (2 * idx - 4) * dim1
idxeIB = (2 * idx - 4) * dim1
idxeID = (2 * idx - 4) * dim1
idxhJA = (2 * idx - 3) * dim1
idxhJB = (2 * idx - 5) * dim1
idxhJD = (2 * idx - 4) * dim1
idxhIA = (2 * idx - 3) * dim1
idxhIB = (2 * idx - 3) * dim1
idxhID = (2 * idx - 3) * dim1
IAe = Dim1 + idxeIA
JAe = Dim1 + idxeJA
IBe = Dim1 + idxeIB
JBe = Dim1 + idxeJB
IDe = Dim1 + idxeID
JDe = Dim1 + idxeJD
IAh = Dim1 + idxhIA
JAh = Dim1 + idxhJA
IBh = Dim1 + idxhIB
JBh = Dim1 + idxhJB
IDh = Dim1 + idxhID
JDh = Dim1 + idxhJD
idx -= 1
M[numpy.ix_(IAe, JAe)] = numpy.dot(Ree[:,:,idx-1].T * Te[:,idx-1], Ree[:,:,idx-1]) + numpy.diag(Te[:,idx])
M[numpy.ix_(IBe, JBe)] = -Ree[:,:,idx-1].T * Se[:,idx-1]
M[numpy.ix_(IDe, JDe)] = -kz * numpy.dot(Ree[:,:,idx-1].T, Rhem[:,:,idx-1].T)
M[numpy.ix_(IAh, JAh)] = numpy.dot(Rhhm[:,:,idx-1] * Th[:,idx-1], Rhh[:,:,idx-1]) + numpy.diag(Th[:,idx])
M[numpy.ix_(IBh, JBh)] = -Rhhm[:,:,idx-1] * Sh[:,idx-1]
M[numpy.ix_(IDh, JDh)] = kz * numpy.dot(Rhhm[:,:,idx-1], Rhe[:,:,idx-1])
return M
def check_matching(kz, slices, modo, R):
Te, Th, Se, Sh, Teleft, Teright, Thleft, Thright = creaTeThSeSh(kz, slices)
Shr = numpy.array([m.sr for m in modo.modih])
Shl = numpy.array([m.sl for m in modo.modih])
Ser = numpy.array([m.sr for m in modo.modie])
Sel = numpy.array([m.sl for m in modo.modie])
Ahr = numpy.array([m.ar for m in modo.modih])
Ahl = numpy.array([m.al for m in modo.modih])
Aer = numpy.array([m.ar for m in modo.modie])
Ael = numpy.array([m.al for m in modo.modie])
kz = modo.keff
n1 = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,1], Shl[:,2]) - Shr[:,1])
n2 = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,0], Shl[:,1]) - Shr[:,0])
n3 = numpy.linalg.norm(numpy.dot(R.Ree[:,:,1], Sel[:,2]) - Ser[:,1])
n4 = numpy.linalg.norm(numpy.dot(R.Ree[:,:,0], Sel[:,1]) - Ser[:,0])
n5 = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,1], Ahl[:,2]) - kz * numpy.dot(R.Rhe[:,:,1], Sel[:,2]) - Ahr[:,1])
n6 = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,0], Ahl[:,1]) - kz * numpy.dot(R.Rhe[:,:,0], Sel[:,1]) - Ahr[:,0])
n7 = numpy.linalg.norm(numpy.dot(R.Reem[:,:,1].T, Ael[:,2]) + kz * numpy.dot(R.Rhem[:,:,1].T, Shl[:,2]) - Aer[:,1])
n8 = numpy.linalg.norm(numpy.dot(R.Reem[:,:,0].T, Ael[:,1]) + kz * numpy.dot(R.Rhem[:,:,0].T, Shl[:,1]) - Aer[:,0])
n9 = numpy.linalg.norm(-Te[:,0] * Sel[:,0] + Se[:,0] * Ser[:,0] - Ael[:,0])
n10 = numpy.linalg.norm(-Te[:,1] * Sel[:,1] + Se[:,1] * Ser[:,1] - Ael[:,1])
n11 = numpy.linalg.norm(-Te[:,2] * Sel[:,2] + Se[:,2] * Ser[:,2] - Ael[:,2])
n12 = numpy.linalg.norm(-Th[:,0] * Shl[:,0] + Sh[:,0] * Shr[:,0] - Ahl[:,0])
n13 = numpy.linalg.norm(-Th[:,1] * Shl[:,1] + Sh[:,1] * Shr[:,1] - Ahl[:,1])
n14 = numpy.linalg.norm(-Th[:,2] * Shl[:,2] + Sh[:,2] * Shr[:,2] - Ahl[:,2])
n15 = numpy.linalg.norm(-Sh[:,0] * Shl[:,0] + Th[:,0] * Shr[:,0] - Ahr[:,0])
n16 = numpy.linalg.norm(-Sh[:,1] * Shl[:,1] + Th[:,1] * Shr[:,1] - Ahr[:,1])
n17 = numpy.linalg.norm(-Sh[:,2] * Shl[:,2] + Th[:,2] * Shr[:,2] - Ahr[:,2])
n18 = numpy.linalg.norm(-Se[:,0] * Sel[:,0] + Te[:,0] * Ser[:,0] - Aer[:,0])
n19 = numpy.linalg.norm(-Se[:,1] * Sel[:,1] + Te[:,1] * Ser[:,1] - Aer[:,1])
n20 = numpy.linalg.norm(-Se[:,2] * Sel[:,2] + Te[:,2] * Ser[:,2] - Aer[:,2])
Nv = numpy.array([n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15, n16, n17, n18, n19, n20])
N = numpy.linalg.norm(Nv);
return N
def creacoeffx3(kz, solution, slices, R):
Nslices = len(slices)
xl = slices[0].boundary.xleft
xr = slices[0].boundary.xright
nmodi = len(slices[0].modih)
Rhh = R.Rhh
Ree = R.Ree
Rhe = R.Rhe
Rhem = R.Rhem
Reem = R.Reem
Te, Th, Se, Sh, Teleft, Teright, Thleft, Thright = creaTeThSeSh(kz, slices)
##sl2end = numpy.reshape(solution, (nmodi, 2 * (Nslices - 1)))
sl2end = numpy.reshape(solution, (2 * (Nslices - 1), nmodi)).T
idxslices = 2 * numpy.arange((Nslices-1))
sle2end = sl2end[:, idxslices]
slh2end = sl2end[:, idxslices + 1]
ale = numpy.zeros((nmodi,Nslices), dtype=complex);
sre = numpy.zeros_like(ale)
are = numpy.zeros_like(ale)
alh = numpy.zeros_like(ale)
srh = numpy.zeros_like(ale)
arh = numpy.zeros_like(ale)
if (xl == 'Electric Wall') & (xr == 'Electric Wall'):
sle = numpy.c_[numpy.zeros((nmodi,1)), sle2end]
sre[:,-1] = numpy.zeros(nmodi)
are[:,-1] = -Se[:,-1] * sle[:,-1]
ale[:,-1] = -Te[:,-1] * sle[:,-1]
slh = numpy.c_[numpy.zeros((nmodi,1)), slh2end]
arh[:,-1] = numpy.zeros(nmodi)
srh[:,-1] = Sh[:,-1] / Th[:,-1] * slh[:,-1]
alh[:,-1] = -Th[:,-1] * slh[:,-1] + Sh[:,-1] * srh[:,-1]
for idx in range(1, Nslices):
sre[:,idx-1] = numpy.dot(Ree[:,:,idx-1], sle[:,idx])
srh[:,idx-1] = numpy.dot(Rhh[:,:,idx-1], slh[:,idx])
slh[:,0] = Sh[:,0] / Th[:,0] * srh[:,0]
for idx in range(Nslices - 1, 0, -1):
are[:,idx-1] = numpy.dot(Reem[:,:,idx-1].T, ale[:,idx]) + kz * numpy.dot(Rhem[:,:,idx-1].T, slh[:,idx])
arh[:,idx-1] = numpy.dot(Rhh[:,:,idx-1], alh[:,idx]) - kz * numpy.dot(Rhe[:,:,idx-1], sle[:,idx])
ale[:,idx-1] = -Te[:,idx-1] * sle[:,idx-1] + Se[:,idx-1] * sre[:,idx-1]
alh[:,idx-1] = -Th[:,idx-1] * slh[:,idx-1] + Sh[:,idx-1] * srh[:,idx-1]
elif (xl == 'Electric Wall') & (xr == 'Magnetic Wall'):
sle = numpy.c_[numpy.zeros((nmodi,1)), sle2end]
are[:,-1] = numpy.zeros(nmodi)
sre[:,-1] = Se[:,-1] / Te[:,-1] * sle[:,-1]
ale[:,-1] = -Te[:,-1] * sle[:,-1] + Se[:,-1] * sre[:,-1]
slh = numpy.c_[numpy.zeros((nmodi,1)), slh2end]
srh[:,-1] = numpy.zeros(nmodi)
arh[:,-1] = -Sh[:,-1] * slh[:,-1]
alh[:,-1] = -Th[:,-1] * slh[:,-1]
for idx in range(1, Nslices):
sre[:,idx-1] = numpy.dot(Ree[:,:,idx-1], sle[:,idx])
srh[:,idx-1] = numpy.dot(Rhh[:,:,idx-1], slh[:,idx])
slh[:,0] = Sh[:,0] / Th[:,0] * srh[:,0]
for idx in range(Nslices - 1, 0, -1):
are[:,idx-1] = numpy.dot(Reem[:,:,idx-1].T, ale[:,idx]) + kz * numpy.dot(Rhem[:,:,idx-1].T, slh[:,idx])
arh[:,idx-1] = numpy.dot(Rhh[:,:,idx-1], alh[:,idx])- kz * numpy.dot(Rhe[:,:,idx-1], sle[:,idx])
ale[:,idx-1] = -Te[:,idx-1] * sle[:,idx-1] + Se[:,idx-1] * sre[:,idx-1]
alh[:,idx-1] = -Th[:,idx-1] * slh[:,idx-1] + Sh[:,idx-1] * srh[:,idx-1]
elif (xl == 'Magnetic Wall') & (xr == 'Electric Wall'):
sle = numpy.c_[numpy.zeros((nmodi,1)), sle2end]
slh = numpy.c_[numpy.zeros((nmodi,1)), slh2end]
sre[:,-1] = numpy.zeros(nmodi);
arh[:,-1] = numpy.zeros(nmodi);
srh[:,-1] = Sh[:,-1] / Th[:,-1] * slh[:,-1]
are[:,-1] = -Se[:,-1] * sle[:,-1]
ale[:,-1] = -Te[:,-1] * sle[:,-1]
alh[:,-1] = -Th[:,-1] * slh[:,-1] + Sh[:,-1] * srh[:,-1]
for idx in range(1, Nslices):
sre[:,idx-1] = | numpy.dot(Ree[:,:,idx-1], sle[:,idx]) | numpy.dot |
from __future__ import absolute_import
from __future__ import print_function
import logging
import numpy as np
from numpy.lib.recfunctions import append_fields
from sklearn.cluster import DBSCAN
from lmatools.coordinateSystems import GeographicSystem
from lmatools.flashsort.flash_stats import calculate_flash_stats, Flash
def gen_stream(vec, IDs): #<1>
for v, vi in zip(vec, IDs):
yield (v, vi)
def reset_buffer():
buf = []
return buf, buf.append
def gen_chunks(stream, start_time, max_duration, t_idx=-1):
""" Generator function that consumes a stream of points, one at a
time, and their unique index. These points are bundled together
into a chunks of length max_duration along the time coordinate.
For each point vector v, the time coordinate is given by v[t_idx]
"""
next_time = start_time + max_duration
v_buffer, append = reset_buffer() # slight optimization since attr lookup is avoided
i_buffer, append_idx = reset_buffer()
for v, vi in stream:
append(v)
append_idx(vi)
t = v[t_idx]
if t >= next_time:
yield (np.asarray(v_buffer), np.asarray(i_buffer))
v_buffer, append = reset_buffer()
i_buffer, append_idx = reset_buffer()
next_time = t+max_duration
yield (np.asarray(v_buffer), np.asarray(i_buffer))
class ChunkedFlashSorter(object):
"""
Sort LMA data from points to flashes using many small chunks
of points. Allows for algorithms that do not scale efficiently with
large numbers of points.
The __init__ and geo_to_cartesian
methods are more generally useful, and could be factored out into a
generic flash sorting class.
The actual clustering algorithm must be implemented in identify_clusters.
A prototype method is provided below which indicates the necessary call
signature.
"""
def __init__(self, params, min_points=1, **kwargs):
"""
params: dictionary of parameters used to perform data QC and clustering
min_points: the minimum number of points allowed in a cluster
"""
self.logger = logging.getLogger('FlashAutorunLogger')
self.logger.info('%s', params)
self.params = params
self.min_points = min_points
self.ctr_lat, self.ctr_lon, self.ctr_alt = (
params['ctr_lat'], params['ctr_lon'], 0.0)
def geo_to_cartesisan(self, lon, lat, alt):
""" Convert lat, lon in degrees and altitude in meters to
Earth-centered, Earth-fixed cartesian coordinates. Translate
to coordinate center location. Returns X,Y,Z in meters.
"""
geoCS = GeographicSystem()
X,Y,Z = geoCS.toECEF(lon, lat, alt)
Xc, Yc, Zc = geoCS.toECEF(self.ctr_lon, self.ctr_lat, self.ctr_alt)
X, Y, Z = X-Xc, Y-Yc, Z-Zc
return (X, Y, Z)
def identify_clusters(self, data):
""" For data with shape (N, D) in D dimensions, return
a vector of labels of length N.
min_points is the minimum number of points required to form a
a cluster. For the DBSCAN algorithm, this is min_samples for
a core cluster.
This function adopts the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
err = "Please create a new subclass and implement this method"
raise NotImplementedError(err)
def gen_cluster_chunk_pairs(self, stream):
""" Generator function that consumes a stream of chunks of data,
and processes overlapping pairs. The stream is to consist of
tuples of (chunk, pt_id), where pt_id is a unique index for
each vector in chunk.
Chunk is of shape (N, D) for N point vectors in D dimensions
pt_id has shape (N,)
Calls self.identify_clusters, which returns a vector N labels.
The labels are presumed to adopt the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
chunk1, id1 = next(stream)
for chunk2, id2 in stream:
len1 = chunk1.shape[0]
len2 = chunk2.shape[0]
if len2 == 0:
conc = chunk1
concID = id1
chunk2 = chunk1[0:0,:]
id2 = id1[0:0]
elif len1 == 0:
conc = chunk2
concID = id2
chunk1 = chunk2[0:0,:]
id1 = id2[0:0]
else:
print(id1.shape, id2.shape)
conc = np.vstack((chunk1, chunk2))
concID = np.concatenate((id1, id2))
# do stuff with chunk 1 and 2
labels = self.identify_clusters(conc)
# defer sending these in one bundle ... need to ensure all labels
# from this run of clustering stay together
# clustered_output_target.send((chunk1, labels[:len1])) IS BAD
# pull data out of chunk2 that was clustered as part of chunk 1
chunk1_labelset = set(labels[:len1])
if -1 in chunk1_labelset:
chunk1_labelset.remove(-1) # remove the singleton cluster ID - we want to retain these from chunk 2.
clustered_in_chunk2 = np.fromiter( ( True if label in chunk1_labelset else False for i,label in enumerate(labels[len1:])) , dtype=bool)
clustered_in_chunk1 = np.ones(chunk1.shape[0], dtype = bool)
clustered_mask = np.hstack((clustered_in_chunk1, clustered_in_chunk2))
bundle_chunks = conc[clustered_mask,:]
bundle_IDs = concID[clustered_mask]
bundle_labels = np.concatenate((labels[:len1], labels[len1:][clustered_in_chunk2]))
assert bundle_chunks.shape[0] == bundle_labels.shape[0]
yield (bundle_chunks, bundle_labels, bundle_IDs)
del bundle_chunks, bundle_labels
# clustered_output_target.send((chunk2[clustered_in_chunk2], labels[len1:][clustered_in_chunk2]))
residuals = conc[clustered_mask==False,:]
# Because we pull some points from chunk2 and combine them with
# flashes that started in chunk1, the data are now out of their
# original order. Therefore, send along the data IDs that go with the
# pulled points so that the original order is still tracked.
residualIDs = concID[clustered_mask==False]
# optimization TODO: pull clusters out of chunk 2 whose final point is greater
# than the distance threshold from the end of the second chunk interval. They're already clustered
# and don't need to be clustered again.
# prepare for another chunk
if len(residuals) == 0:
residuals = chunk1[0:0,:] # empty array that preserves the number of dimensions in the data vector - no obs.
residualIDs = id1[0:0]
del chunk1, id1
chunk1 = np.asarray(residuals)
id1 = np.asarray(residualIDs)
del residuals, residualIDs
if chunk1.shape[0] != 0:
labels = self.identify_clusters(chunk1)
yield (chunk1, labels, id1)
def aggregate_ids(self, stream):
""" Final step in streamed clustering: consume clustered output from
one or more chunks of data, ensuring that the IDs increment
across chunk boundaries.
"""
# TODO: remove v from loop below; not needed.
unique_labels = set([-1])
total = 0
point_labels = []
all_IDs = []
# all_v = []
# n_last = 0
for (v, orig_labels, IDs) in stream:
labels = np.atleast_1d(orig_labels).copy()
if len(unique_labels) > 0:
# Only add those labels that represent valid clusters (nonnegative) to the unique set.
# Make sure labels increment continuously across all chunks received
nonsingleton = (labels >= 0)
labels[nonsingleton] = labels[nonsingleton] + (max(unique_labels) + 1)
for l in set(labels):
unique_labels.add(l)
all_IDs.append(np.asarray(IDs))
point_labels.append(labels)
total += v.shape[0]
del v, orig_labels, labels, IDs
print("done with {0} total points".format(total))
if total == 0:
point_labels = np.asarray(point_labels, dtype=int)
point_labels = np.asarray(all_IDs, dtype=int)
else:
point_labels = np.concatenate(point_labels)
all_IDs = np.concatenate(all_IDs)
print("returning {0} total points".format(total))
return (unique_labels, point_labels, all_IDs)
def create_flash_objs(self, lma, good_data, unique_labels, point_labels, all_IDs):
""" lma is an LMADataset object. Its data instance gets overwritten
with the qc'd, flash_id'd data, and it gains a flashes attribute
with a list of flash objects resulting from flash sorting.
all_IDs gives the index in the original data array to
which each point_label corresponds.
unique_labels is the set of all labels produced by previous stages
in the flash sorting algorithm, including a -1 ID for all singleton flashes.
"""
logger = self.logger
# add flash_id column
empty_labels = np.empty_like(point_labels)
# placing good_data in a list due to this bug when good_data has length 1
# http://stackoverflow.com/questions/36440557/typeerror-when-appending-fields-to-a-structured-array-of-size-one
if 'flash_id' not in good_data.dtype.names:
data = append_fields([good_data], ('flash_id',), (empty_labels,))
else:
data = good_data.copy()
# all_IDs gives the index in the original data array to
# which each point_label corresponds
data['flash_id'][all_IDs] = point_labels
# In the case of no data, lma.data.shape will have
# length zero, i.e., a 0-d array
if (len(data.shape) == 0) | (data.shape[0] == 0):
# No data
flashes = []
else:
# work first with non-singleton flashes
# to have strictly positive flash ids
print(data.shape)
singles = (data['flash_id'] == -1)
non_singleton = data[ np.logical_not(singles) ]
print(non_singleton['flash_id'].shape)
order = np.argsort(non_singleton['flash_id'])
ordered_data = non_singleton[order]
flid = ordered_data['flash_id']
if (flid.shape[0]>0):
max_flash_id = flid[-1]
else:
max_flash_id = 0
try:
assert max_flash_id == max(unique_labels)
except AssertionError:
print("Max flash ID {0} is not as expected from unique labels {1}".format(max_flash_id, max(unique_labels)))
boundaries, = | np.where(flid[1:]-flid[:-1]) | numpy.where |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 08 17:46:45 2017
@author: apfranco
"""
import numpy as np
import scipy
from scipy.optimize import leastsq
def RockPhysicsCalibration(agd, OM):
# ALGORITMO PARA CALIBRACAO DE MODELOS DE FISICA DE ROCHA
#
# MODELOS
# 1 - porosidade de neutrons:
# phi = A + B phiE + C vsh ou
# 2 - raios gama:
# gr = grmin + (grmax - grmin) vsh
# 3 - modelo densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh * (1 - phiE);
# 4 - resistividade:
# 1/ Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
#
# DESCRICAO GERAL:
# O programa deve ser rodado para gerar os coefientes e densidades acima descritos
# para serem usados em etapas posteriores de inferencia de porosidade,
# volume de argila e saturacao. O programa fornece opcao de entrada de
# limites estratigraficos conhecidos, realizando uma calibracao geral para
# todo o pacote e tambem em grupos separados em funcao do volume de
# folhelho como funcao de um valor de corte (cutclay). O programa fornece 3
# opcoes de saida envolvendo calibracao em todo o segmento analizado, em
# segmentos menores definidos na entrada (secHoriz) ou em nesses mesmos segmentos
# menores subdivididos ainda mais em funcao do conteudo de folhelho.
#
# PARAMETROS DE ENTRADA:
# dados de perfis - raios gama, porosidade, densidade, VP e VS
# dados de testemunho (se disponiveis) - volume de argila, porosidade, densidade
# top, bot - limites superior e inferior da secao a ser analisada
# phiSand - porosidade de areia homogenea (zero em conteudo de argila)
# grmin, grmax - valores minimo e maximo para a conversao de raios gama em volume de folhelho
# cutclay - valor limite para a transicao de areia para folhelho (grao para matriz suportada)
# secHoriz - Matriz (nFac x 2) contendo os limites superior e inferior de cada unidade estratigrafica
# satUncert - =0 desliga seletor de calibracao para horizonte com oleo.
# Caso contrario iOut necesariamente igual a 3
# iOut - seletor de detalhamento de facies para saida de parametros 1, 2,
# ou 3, conforme explicado acima.
# modPhiC - seletor do tipo de porosidade de calibracao (porosidade
# efetiva): = 1 perfil porosidade de neutros; = 2 porosidade
# efetiva independente (ex. testemunho); = 3 porosidade efetiva
# calculada pela formula 1 acima.
# OBS: CUIDADO opcao modPhiC = 3 carece de aprimoramentos devendo ser usada em
# casos muito especificos. Em geral produz matrizes mal condicionadas.
#
# PARAMETROS DE SAIDA:
# calibData_nomePoco - arquivo contendo os dados de referencia para o processo de calibracao
# phiC
# clayC
# rhoC
# resC
# calibCPR_Vel_nomePoco - arquivo contendo os parametros do modelo linear de velocidade de Han
# facies
# phiSand
# neutron
# denLitho
# cValuesPhi
# cValuesChi
# covMatrixPar
# coefVP
# coefVS
# fluidProp
# fluidPars
print ("CHAMANDO A FUNCAO EM ALGO")
#Parametros de entrada
inputPars = agd.get_input()
well_uid = agd.get_well_uid()
log_index = OM.list('log', well_uid)[0]
indexes = log_index.get_index()[0]
z = indexes[0].data
topCL = inputPars.get('topCL', None) #Intervalo para calibracao (com agua)
botCL = inputPars.get('botCL', None)
top = inputPars.get('top', None) #Intervalo para inferencia
bot = inputPars.get('bot', None)
indLog = np.argwhere(np.logical_and(z>=top, z<=bot))
indLog = np.squeeze(indLog,1)
#Input dos Perfis de pressao
press_file = np.loadtxt('U:/bkp_Windows06nov2017/Documents/Pocos_Morena/MA20.prs')
z = z[indLog]
gr = inputPars.get('gr', None )
gr = gr[indLog]
gr = logInterp(gr,z)
phi = inputPars.get('phi', None )
phi = phi[indLog]
phi = logInterp(phi,z)
rhoFull = inputPars.get('rho', None )
rho = rhoFull[indLog]
rho = logInterp(rho,z)
res = inputPars.get('res', None )
res = res[indLog]
if (np.all(res == np.NaN)):
res = np.empty(np.size(indLog))
else:
res = logInterp(res,z)
fac = inputPars.get('fac', None )
fac = fac[indLog]
fac = np.array(np.floor(fac), dtype=int)
fac = logInterp(fac,z)
#Input dos Perfis de pressao
zProv = indexes[0].data
mpp = 0.0980665*press_file[:,0]
mtzp = press_file[:,1]
lpres, cpres = np.shape(press_file)
if (cpres == 3):
mmzp = press_file[:,cpres - 1]
else:
mmzp = np.empty([0,0])
nDP = np.size(mtzp)
tvdss = inputPars.get('tvdss', None )
tvdss = tvdss[indLog]
izp = np.empty(nDP, dtype=int)
if (np.size(mmzp) == 0):
indr = indLog
lindr = np.size(indr) - 1
tol = 0.1
for i in range (0, nDP):
indp = np.argwhere(np.logical_and(tvdss <= (mtzp[i] + tol), tvdss >= (mtzp[i] - tol)))
indp= np.squeeze(indp,1)
cizp = np.argwhere(np.logical_and(indp >= indr[0], indp <= indr[lindr]))
cizp= np.squeeze(cizp,1)
if (np.size(cizp) == 0):
izp[i] = np.argmin(np.abs(tvdss - mtzp[i]))
else:
izp[i] = indp[cizp[0]]
mzp = zProv[izp]
matsort = np.concatenate([[mzp],[mpp], [mtzp],[izp]]).T
indsort = np.argsort(matsort[:,0],0)
matsort = np.array([[matsort[indsort,0]],[matsort[indsort,1]],[matsort[indsort,2]],[matsort[indsort,3]]]).T
matsort = np.squeeze(matsort)
mzp = matsort[:,0]
mpp = matsort[:,1]
mtzp = matsort[:,2]
izp = matsort[:,3].astype(int)
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
else:
mzp = mmzp
for i in range (0, nDP):
izp[i] = np.argmin(np.abs(zProv - mzp[i]))
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
phiCore = np.empty([0,0])
secHoriz = np.array([top, bot])
#Parametros e dados de calibracao e saida
nFac = 4
modPhiC = 1 #indicador do tipo de dado de calibracao a ser usado como porosidade efetiva
#1: perfil de neutrons 2: perfil de porosidade efetiva
useCore = 0
iOut = 2
#iuseclay = 0 #indicador do tipo de argilosidade a ser usado
#0: vsh direto do perfil 1: clay (calculada atraves do GR)
#Parametros de densidade
rhoMin = np.array([2.55, 2.569, 2.623, 2.707]) #Existem 4 facies na regiao relatada
#Parametros de resistividade
mP = 2.0 # expoente de cimentacao em areias limpas: 1.3 (inconsolidado) - 2.0 (consol.)
nS = 2.0 # expoente de saturacao em areias limpas 1.5 - 2.0.
# E reduzido na presenca de laminacao e microporosidade
aT = 0.8 # constante da eq. de Archie
Rw = 0.028 # resistividade da agua
Rsh = 2.048 # resistividade do folhelho
resCoef = np.array([[mP, nS, aT*Rw, Rsh], [1.5, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh]])
# Secao de Propriedades dos fluidos e matrizes de areia e folhelho
#Parametros
#calculo da pressao
pres_poros = np.mean(mpp) # pressao de poro referencia para o calc da densidade
temp = 89.0 # temperatura oC
sal = 102400 # salinidade
RGO = 75.0 # razao gas oleo
API = 29.0 # grau API
G = 0.835 # gravidade especifica
#Ordenar parametros no vetor para chamada da funcao
fluidPars = np.array([pres_poros, temp, sal, RGO, API, G])
#AQUI COMECA O CODIGO secCalibVshPhiRhoRes_vpHan
#Trecho de calibracao
indCL = np.where(np.logical_and(z>=topCL, z<=botCL))
nData = np.size(z)
# Calculo de porosidade efetiva e vsh com estimativa dos valores
# de grmin e grmax em todo o pacote coberto pelos dados
# Transformacao dos dados observados
# Volume de folhelho a partir de rais gama
indSh = np.argwhere(fac==4)
indSh= np.squeeze(indSh,1)
indSd = np.argwhere(fac == 1)
indSd= np.squeeze(indSd,1)
if (np.size(indSh) == 0 and np.size(indSd) == 0):
grmax = np.percentile(gr, 95)
grmin = np.percentile(gr, 5)
else:
grmax = np.percentile(gr[indSh], 95) #146.3745
grmin = np.percentile(gr[indSd], 5) #54.2600
claye = vshGRcalc(gr, grmin, grmax)
#Por enquanto usando apenas modPhic == 1
if modPhiC == 1:
grlim = grmax
ind = np.where (gr>= grlim)
phiNsh = np.median(phi[ind])
phiEe = np.fmax(0.01, phi - claye*phiNsh)
modPhiC =2
elif (modPhiC == 2 and np.size(phiCore) == 0):
print ("Nao existe a funcao chamada aqui dentro")
#phiEe = phiSd2phiE (zR, claye, phiSand, secHoriz)
elif (modPhiC == 2 and useCore == 1 ):
phiEe = phiCore
#fluidProp matriz com valores para Kf e densidade para fases salmoura,
#oleo e gas, ordenados da seguinte forma:
#bulk_salmoura, bulk_oleo, bulk_gas (modulo variavel com a pressao
#rho_salmoura, rho_oleo, rho_gas (so a densidade sera fixa)
nDP = np.size(mpp)
fluidPropP = np.empty([nDP, 2, 3]) #esqueleto de nDP 'paginas' que guardara
#as matrizes 2x3 de retorno da funcao seismicPropFluids
for i in np.arange(0, nDP):
#atualizar pressao de poro
fluidPars[0] = mpp[i]
fluidPropP[i] = seismicPropFluids(fluidPars)
fluidProp = np.mean(fluidPropP, 0)
rhoFluids = fluidProp[1]
rhoW = rhoFluids[0]
rhoO = rhoFluids[1]
#rock physics model calibration
#selecao de perfis apenas na regial de calibracao com agua
phiC = phiEe[indCL]
clayC = claye[indCL]
rhoCL = rho[indCL]
resCL = res[indCL]
phiCL = phi[indCL]
facCL = fac[indCL]
# Calibracao para toda a secao
rhoMin_T = np.median(rhoMin);
opt = 2
if (opt == 1):
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes(phiCL, rhoCL, resCL, clayC, phiC, rhoMin_T, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes2(phiCL, rhoCL, resCL, clayC, phiC , rhoW, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar_T = np.concatenate([[cPhi_T[0]], [cPhi_T[1]], [cPhi_T[2]]])
denPar_T = np.concatenate([[rhoSd], [rhoWe], [rhoO], [rhoSh], [rhoDisp]])
resPar_T = cRes_T
[phiMod_T, rhoMod_T, resMod_T] = calibCPRRreMod(phiEe, claye, phiPar_T , denPar_T, resPar_T, modPhiC)
facies_T = np.ones((nData,1))
phiMod = np.zeros((nData,1))
rhoMod = np.zeros((nData,1))
resMod = np.zeros((nData,1))
phiPar = np.empty([nFac,3])
denPar = np.empty([nFac,5])
resPar = np.empty([nFac,4])
facH = np.zeros([np.size(facCL),1])
for i in range(0,nFac):
ind = np.argwhere(facCL == i + 1)
ind= np.squeeze(ind,1)
secPhi = phiCL[ind]
secRho = rhoCL[ind]
secRes = resCL[ind]
secClayC = clayC[ind]
secPhiC = phiC[ind]
#[cHan,vpMod(ind),s2] = calibHan(secVP,secPhiC,secClayC);
#coefHanVP(i,:) = cHan';
# a parte de porosidade de neutrons e densidade nao utiliza separacao
# e calibracao distinta para grupamentos em termos de volume de
# folhelho. Os coeficientes sao repetidos (iguais) para areia e folhelho
resCoef_line = np.empty((resCoef.shape[0],1))
resCoef_line[:,0] = resCoef[i]
if (opt == 1):
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes(secPhi, secRho, secRes, secClayC, secPhiC , rhoMin[i], resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes2(secPhi, secRho, secRes, secClayC, secPhiC , rhoW, resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar[i] = np.array([cPhi[0], cPhi[1], cPhi[2]])
denPar[i] = np.array([rhoSd, rhoWe, rhoO, rhoSh, rhoDisp])
resPar[i] = cRes
facH[ind] = i + 1
resPar_line = np.empty([1,nFac])
resPar_line[0,:] = resPar[i]
ind = np.argwhere(fac == i + 1)
ind= np.squeeze(ind,1)
passArg = np.array([rhoSd, rhoW, rhoSh])
[dataPhi, dataRho, dataRes] = calibCPRRreMod(phiEe[ind], claye[ind], phiPar[i],passArg, resPar_line, modPhiC)
phiMod[ind,0] = dataPhi
rhoMod[ind,0] = dataRho
resMod[ind] = dataRes
if (iOut == 1):
nOutFac = 1
facies = facies_T
neutron = phiPar_T
denLitho = denPar_T
rhoComp = rhoMod_T
phiComp = phiMod_T
resComp = resMod_T
elif (iOut == 2):
nOutFac = np.ones([nFac,1])
facies = facH
neutron = phiPar
denLitho = denPar
denLitho[:,4] = neutron[:,2]
rhoComp = rhoMod
phiComp = phiMod
resComp = resMod
else:
raise Exception ('Seletor de saida deve ser 1 ou 2')
r2Phi = rsquared (phiComp, phi)
r2Rho = rsquared (rhoComp, rho)
r2Res = rsquared (resComp, res)
print ("Fim da calibracao, com seguintes ajustes R2:\n Phi = %7.2f\n RHO = %7.2f\n RES = %7.2f\n" % (r2Phi, r2Rho, r2Res))
#Saida de Dados
def calibClayPhiRhoRes(phi, rho, Rt, vsh, phiE, rhoMin, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoMin - densidade media dos graos minerais constituintes da matriz da rocha
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
rhoPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
vecConc = vsh*(1-phiE)
B = np.concatenate([[phiE], [vecConc]])
xRho1 = fitNorm1(B, (rho - rhoMin), 10)
rhoPar[0] = rhoMin
rhoPar[1] = xRho1[0] + rhoMin
rhoPar[2] = xRho1[1] + rhoMin
rhoComp = np.dot(B,xRho1) + rhoMin
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0] #checar como vai se comportar sem lb e ub
RtPar = np.concatenate([cRes, xRes])
RtPar = RtPar.reshape(1, RtPar.size)
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibClayPhiRhoRes2(phi, rho, Rt, vsh, phiE, rhoWater, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoWater - densidade da agua
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1-phiE)
B = np.concatenate([[col1], [col2]]).T
rhoCte = rhoWater * phiE
xRho = fitNorm1(B, (rho - rhoCte),10)
rhoPar = np.empty(2)
rhoPar[0] = xRho[0]
rhoPar[1] = xRho[1]
rhoComp = np.dot(B, xRho) + rhoCte
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0]
print ("VALORES DE xRES", xRes)
RtPar = np.concatenate([cRes, xRes])
RtPar = np.reshape(RtPar,(1,np.size(RtPar)))
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibCPRRreMod(phiE, vsh, phiPar, rhoPar, RtPar, mode):
# FINALIDADE: calcular os dados modelados usando os modelos calibrados
# em outro intervalo do poco, seguindo as 3 opcoes distintas para a porosidade efetiva:
# 1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
# 2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
#
# ENTRADA:
# phi - perfil de neutrons
# rho - perfil de densidade
# vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
# phiE - perfil de porosidade efetiva
# phiPar
# rhoPar - densidade da agua
# RtPar -
# mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
# casos acima descritos.
#
# SAIDA:
# phiComp - perfil calculado de porosidade de neutrons
# rhoComp - perfil calculado de densidade
# RtComp
#
#
# MODELOS
# porosidade de neutrons:
# phi = A + 1.0 phiE + C vsh
# modelo de densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
# modelo de resistividade:
# Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
if (mode != 1 and mode != 2 and mode != 3):
raise Exception ('Seletor de porosidadade efetiva de entrada deve ser 1 ou 2')
n = np.size(vsh)
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ('Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada');
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode ==2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - phiE + vsh
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode == 3):
phiSand = 0.25
# nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1] #Verificar o uso desse mode 3, talvez seja melhor cortar fora do if la em cima
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1 - phiE)
B = np.concatenate([[col1], [col2]])
rhoCte = rhoPar[1]*phiE
rhoComp = col1 * rhoPar[0] + col2*rhoPar[2] + rhoCte
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiComp, rhoComp, RtComp
def fitNorm1(A, d, maxIt):
xLS = np.linalg.lstsq(A,d)[0]
dComp = np.dot(A,xLS)
res = d - dComp
rmsOld = np.sqrt(np.sum(res**2))
drms = 1
it = 1
while (drms > 1e-07 or it == maxIt):
R = np.diag(1./(np.abs(res) + 1e-08 ))
B1 = np.dot(A.T, R)
B = np.dot(B1, A)
b = np.dot(B1,d)
xN1 = np.linalg.lstsq(B,b)[0]
dComp = np.dot(A,xN1)
res = d - dComp
rms = np.sqrt(np.sum(res**2))
drms = rms - rmsOld
rmsOld = rms
it = it + 1
return xN1
def seismicPropFluids(fluidPars):
""" II - CONSTRUTOR DE FLUIDO (PROPRIEDADES SISMICAS DA AGUA DE FORMACAO, OLEO E GAS, BEM COMO MISTURA)
* OBS: atentar para as unidades, dens = g/cm3,
Salinidade (ppm) - retiradas do livro WEC Brasil, pagina IV-8 (tabela: leituras caracterisiticas das ferramentas de perfilagem)
sal = 330000 ppm @ 80 celsius
Temperatura da formacao (Celsius)
Pressao de poros (PSI ou KgF)
conversao de unidades (pressao) Aqui a unidade tem que ser em MPASCAL, o fornecido e em PSI
1 PSI = 14,22 kgf/cm2
1 PSI = 6894,757 Pa
1 atm = 101,35 KPa
1 Pa = 1,019716*10^-5 kgf/cm^2
exemplo : pres_poros = 6500;
pascal = 6500 PSI/ 14.22 = 457,10 pascals
1 PSI = 6894,757 Pa
6500 psi = X pa =================> Pa = 44.815.920,50 =========== MPa = 44.8159205
exemplo : pres_poros = 287 kgf/cm2;
287 kgf = 278/1.0197*10^-5 Pa
=================> Pa = 28.145.532,99 =========== MPa = 28.145534
"""
#Leitura das propriedades do reservatorio
pres_poros = fluidPars[0]; # pressao de poro
temp = fluidPars[1]; # temperatura oC
sal = fluidPars[2]; # salinidade
RGO = fluidPars[3]; # razao gas oleo
API = fluidPars[4]; # grau API
G = fluidPars[5]; # gravidade especifica
# a) Agua doce
cte_1 = -80*temp - 3.3*(temp**2) + 0.00175*(temp**3) + 489*pres_poros - 2*temp*pres_poros
cte_2 = + 0.016*(temp**2)*pres_poros - 1.3*(10**(-5))*(temp**3)*pres_poros - 0.333*(pres_poros**2) - 0.002*temp*(pres_poros**2)
dens_agua_doce = 1 + 1*(10**(-6))*(cte_1 + cte_2)
# pesos
w = np.empty([5,4])
w[0][0] = 1402.85
w[1][0] = 4.871
w[2][0] = -0.04783
w[3][0] = 1.487*10**(-4)
w[4][0] = -2.197*10**(-7)
w[0][1] = 1.524
w[1][1] = -0.0111
w[2][1] = 2.747*10**(-4)
w[3][1] = -6.503*10**(-7)
w[4][1] = 7.987*10**(-10)
w[0][2] = 3.437*10**(-3)
w[1][2] = 1.739*10**(-4)
w[2][2] = -2.135*10**(-6)
w[3][2] = -1.455*10**(-8)
w[4][2] = 5.230*10**(-11)
w[0][3] = -1.197*10**(-5)
w[1][3] = -1.628*10**(-6)
w[2][3] = 1.237*10**(-8)
w[3][3] = 1.327*10**(-10)
w[4][3] = -4.614*10**(-13)
v_agua = 0
for i in np.arange(0, 5):
for j in np.arange (0,4):
v_agua = v_agua + w[i][j]*(temp**(i))*(pres_poros**(j)) #esse -1 +1 esta assim pq veio do matlab, ajuste de indices
# b) Agua de formacao - salmoura
S = sal/1000000
cte_3 = temp*(80 + 3*temp - 3300*S - 13*pres_poros + 47*pres_poros*S)
cte_4 = 1*(10**(-6))*(300*pres_poros - 2400*pres_poros*S + cte_3)
dens_salmoura = dens_agua_doce + S*(0.668 + 0.44*S + cte_4)
#converter a densidade salmoura de g/cm3 para kg/m3, ie x10^3, para calcular os modulos elasticos
cte_5 = 1170 - 9.6*temp + 0.055*(temp**2) - 8.5*(10**(-5))*(temp**3) + 2.6*pres_poros - 0.0029*temp*pres_poros - 0.0476*(pres_poros**2)
cte_6 = (S**(1.5))*(780 - 10*pres_poros + 0.16*(pres_poros**2)) - 1820*(S**2)
v_salmoura = v_agua + S*cte_5 + cte_6
bulk_salmoura_Pa = dens_salmoura*(10**3)*(v_salmoura**2)
bulk_salmoura = bulk_salmoura_Pa * 10**(-9)
# c) oleo
#RGO - razao gas/oleo (litro/litro)- caracterisitica do oleo vivo.
#API do oleo - baseado na densidade (dens_0_oleo) do oleo morto a 15,6 oC e a pressao atmosferica (condicao API) expressa em g/cm3
#G - gravidade especifica do gas
dens_0_oleo = 141.5/(API + 131.5)
B_0 = 0.972 + 0.00038*(2.4*RGO*((G/dens_0_oleo)**(0.5)) + temp + 17.8)**(1.175)
cte_7 = (dens_0_oleo + 0.0012*G*RGO)/B_0
cte_8 = 0.00277*pres_poros - (1.71*(10**(-7))*(pres_poros**3))
dens_oleo = cte_7 + cte_8*(cte_7 - 1.15)**2 + 3.49*(10**(-4))*pres_poros
dens_linha = (dens_0_oleo)/(B_0*(1 + 0.001*RGO))
cte_9 = ((dens_linha)/(2.6 - dens_linha))**(0.5)
cte_10 = 4.12*((1.08/dens_linha - 1)**(0.5)) - 1
v_oleo = 2096*cte_9 - 3.7*temp + 4.64*pres_poros + 0.0115*cte_10*temp*pres_poros
bulk_oleo_Pa = dens_oleo*(10**3)*((v_oleo)**2)
bulk_oleo = bulk_oleo_Pa * 10**(-9)
# d) gas
t_pr = (temp + 273.15) / (94.72 + 170.75*G)
p_pr = pres_poros / (4.892 - 0.4048*G)
exp_g = np.exp ( (-(0.45 + 8*((0.56-(1./t_pr))**2))*((p_pr)**(1.2))) / (t_pr) )
E = 0.109*((3.85 - t_pr)**2)*exp_g
cte_11 = (0.03 + 0.00527*((3.5 - t_pr)**3))
Z = cte_11*p_pr + (0.642*t_pr - 0.007*(t_pr**4) - 0.52) + E
dens_gas = 3.4638657*((G*pres_poros)/(Z*(temp + 273.15)))
gamma_0 = 0.85 + (5.6/(p_pr + 2)) + (27.1/((p_pr + 3.5)**2)) - 8.7*np.exp(-0.65*(p_pr + 1))
deriv_Z = cte_11 - (0.1308*((3.85-t_pr)**2)*(0.45 + 8*(0.56 - 1./(t_pr))**2)) * (((p_pr)**(0.2))/(t_pr)) * exp_g
#bulk_gas em MPa - nao esquecer de transforma p/ pascal para obter a veloc em m/s
bulk_gas_MPa = ((pres_poros*gamma_0)/(1-((p_pr/Z)*(deriv_Z)))) # bulk em MPa
v_gas = ((bulk_gas_MPa*(10**6))/(dens_gas*1000))**(0.5) #bulk MPa = 10^6 Pa, densidade g/cm3 = 1000Kg/m3
bulk_gas = bulk_gas_MPa *10**(-3)
bulkNden = np.array([[bulk_salmoura, bulk_oleo, bulk_gas], [dens_salmoura, dens_oleo, dens_gas]])
return bulkNden
def vshGRcalc(gr, grmin, grmax):
# Finalidade:
# calculo da argilosidade a partir do perfil de raios gama
# Entrada:
# gr - dados de raio gama
# grmin - valor minimo de referencia - areia limpa
# grmax - valor maximo de referencia - folhelho
n = np.size(gr)
grfa = grmax - grmin
arg = np.empty(np.size(gr))
for i in np.arange(0, n):
arg[i] = (gr[i] - grmin)/grfa
if arg[i] < 0.0:
arg[i] = 0.0
if arg[i] >= 1.0:
arg[i] = 0.98
return arg
def ofSimandouxPhiChiSw100(x, resObs, coef, phi, chi):
# FINALIDADE:
# calcular o residuo do perfil de resistividade modelado usando a equacao
# de Simandoux modificada como fc de porosidade e volume de folhelho, considerando 100 %
# saturado em salmoura.
#
# Modelo de resistividade:
# Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh,
# com Sw = 1.0
#
# ENTRADA:
# x - parametros do modelo Simandoux a ser estimados x = [a*Rw Rsh]
# resObs - dados observados de resistividade
# coef - coeficientes do modelo de resistividade = [m n]
# phi - vetor de porosidade efetiva para calculo da funcao
# chi - vetor de volume de folhelho para calculo da funcao
nPhi = np.size(phi)
nChi = np.size(chi)
nObs = np.size(resObs)
if (nPhi != nChi or nPhi != nObs):
raise Exception ("Vetores de entrade devem ter as mesmas dimensoes")
T1 = ( (phi**coef[0])*1.0**coef[1])/((1 - chi)*x[0])
T2 = chi/x[1]
dComp = (T1 + T2)**(-1)
res = resObs - dComp
return res
def dCompSimandouxPhiChiSw100(phi,chi,facies,coef):
# FINALIDADE:
# modelar dados calculados usando o modelo de Simandoux modificado como
# fc de porosidade e volume de folhelho, considerando 100 % saturado em
# salmoura e os coeficientes variando de acordo com as facies litologicas
# correspondentes ao dados observado.
#
# ENTRADA:
# phi - valor(es) em porosidade para calculo da funcao
# chi - valor(es) em volume de folhelho para calculo da funcao
# facies - vetor de indicacao de facies correspondente ao aos dados
# coef - matrix de coeficientes coeficientes do modelo petrofisico
# correspondendo a todas as facies existentes (uma linha para cada
# facies)
nPhi = np.size(phi)
nChi = np.size(chi)
nObs = np.size(facies)
nFac = np.size(coef)
if (nPhi != nChi):
raise Exception('Vetores de entrada devem ter as mesmas dimensoes')
dComp = np.zeros((nObs,1))
if (nPhi ==1):
allFacies = np.arange(1,nFac + 1)
indFac = ismember(allFacies, facies)
for i in np.arange(0, nFac):
if(indFac(i)):
ind = np.argwhere(facies == i + 1)
if (chi >= 1.0):
T1 = 0.0
else:
T1 = ( (phi**coef[i][0])*(1.0**coef[i][1]) )/( coef[i][2]*(1-chi) )
T2 = chi / coef[i][3]
dComp[ind] = 1.0*(T1 + T2)**(-1)
elif(nPhi ==nObs):
for k in np.arange(0,nObs):
ifac = facies[k][0]
if (chi[k] >= 1.0):
T1 = 0.0
else:
T1 = ( (phi[k]**coef[ifac - 1][0])*(1.0**coef[ifac - 1][1]) ) / ( (coef[ifac -1 ][2])*(1 - chi[k]) )
T2 = chi[k] / coef[ifac - 1][3]
dComp[k][0] = (T1 + T2)**(-1)
return dComp
def ismember (A, B):
nA = np.size(A)
C = np.empty(nA)
for i in | np.arange(0,nA) | numpy.arange |
# mathematical imports -
import numpy as np
from matplotlib import pyplot as plt
from sklearn import metrics
from math import sqrt
import seaborn as sns
sns.set()
import torch
# load network imports -
import os
import sys
sys.path.insert(0, '/Users/chanaross/dev/Thesis/MachineLearning/forGPU/')
from CNN_LSTM_NeuralNet_LimitZerosV2 import Model
def createRealEventsUberML_network(eventMatrix, startTime, endTime):
firstTime = startTime
if (endTime - startTime==0):
numTimeSteps = 1
else:
numTimeSteps = endTime - startTime
realMatOut = eventMatrix[ firstTime: firstTime + numTimeSteps, :, :]
return realMatOut
def getInputMatrixToNetwork(previousMat, sizeCnn):
# previousMat is of shape : [seq_len , size_x, size_y]
lenSeqIn = previousMat.shape[0]
lengthX = previousMat.shape[1]
lengthY = previousMat.shape[2]
temp2 = np.zeros(shape=(1, lenSeqIn, sizeCnn, sizeCnn, lengthX * lengthY))
tempPadded = np.zeros(shape=(lenSeqIn, lengthX + sizeCnn, lengthY + sizeCnn))
padding_size = np.floor_divide(sizeCnn, 2)
tempPadded[:, padding_size: padding_size + lengthX, padding_size: padding_size + lengthY] = previousMat
k = 0
for i in range(lengthX):
for j in range(lengthY):
try:
temp2[0, :, :, :, k] = tempPadded[:, i:i + sizeCnn, j: j + sizeCnn]
except:
print("couldnt create input for cnn ")
k += 1
xArr = temp2
if torch.cuda.is_available():
xTensor = torch.Tensor(xArr).cuda()
else:
xTensor = torch.Tensor(xArr)
# xTensor is of shape: [grid id, seq, x_cnn, y_cnn]
return xTensor
def createEventDistributionUber(previousEventMatrix, my_net, eventTimeWindow, startTime, endTime):
"""
this function calculates future events based on cnn lstm network
:param previousEventMatrix: event matrix of previous events
:param my_net: learned network
:param eventTimeWindow: time each event is opened (for output)
:param startTime: start time from which events are created
:param endTime: end time to create events (start and end time define the output sequence length)
:return: eventPos, eventTimeWindow, outputEventMat
"""
# previousEventMatrix is of size: [seq_len, x_size, y_size]
if endTime - startTime == 0: # should output one prediction
out_seq = 1
else:
out_seq = endTime - startTime
x_size = previousEventMatrix.shape[1]
y_size = previousEventMatrix.shape[2]
netEventOut = torch.zeros([out_seq, x_size, y_size])
for seq in range(out_seq):
tempEventMat = previousEventMatrix
input = getInputMatrixToNetwork(previousEventMatrix, my_net.cnn_input_dimension)
k = 0
for x in range(x_size):
for y in range(y_size): # calculate output for each grid_id
testOut = my_net.forward(input[:, :, :, :, k])
_, netEventOut[seq, x, y] = torch.max(torch.exp(testOut.data), 1)
k += 1
previousEventMatrix[0:-1, :, :] = tempEventMat[1:, :, :]
previousEventMatrix[-1, :, :] = netEventOut[seq, :, :]
# in the end netEventOut is a matrix of size [out_seq_len, size_x, size_y]
eventPos = []
eventTimes = []
for t in range(out_seq):
for x in range(x_size):
for y in range(y_size):
numEvents = netEventOut[t, x, y]
# print('at loc:' + str(x) + ',' + str(y) + ' num events:' + str(numEvents))
#for n in range(numEvents):
if numEvents > 0:
eventPos.append(np.array([x, y]))
eventTimes.append(t+startTime)
eventsPos = | np.array(eventPos) | numpy.array |
import numpy as np
class Generic() :
def __init__(self) :
self.nb_params=None # Nombre de parametres de la couche
self.save_X=None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X=np.copy(X)
return None
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local=None
grad_entree=None
return grad_local,grad_entree
class Arctan() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return np.arctan(X)
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
grad_entree = 1/(1 + self.save_X**2) * grad_sortie
return grad_local,grad_entree
class Tanh() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return np.tanh(X)
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
grad_entree = (1 - np.tanh(self.save_X)**2) * grad_sortie
return grad_local,grad_entree
class Sigmoid() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return 1 / (1 + np.exp(-X))
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
exp_minusX = np.exp(-self.save_X)
grad_entree = (exp_minusX / (1 + exp_minusX)**2) * grad_sortie
return grad_local,grad_entree
class RELU() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return X * (X>0)
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
X = self.save_X
grad_entree = (X/np.abs(X)) * (X>0) * grad_sortie
return grad_local,grad_entree
class ConcatProjections() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return X.reshape(X.shape[0], -1, order='F').T
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
grad_entree = grad_sortie.T.reshape(self.save_X.shape, order='F')
return grad_local,grad_entree
class ABS() :
def __init__(self) :
self.nb_params = 0 # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
pass
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return None
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return np.abs(X)
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
grad_local = None
X = self.save_X
grad_entree = X/np.abs(X) * grad_sortie
return grad_local,grad_entree
class ProjectVectors() :
def __init__(self, n_entree, n_sortie) :
self.n_entree = n_entree
self.n_sortie = n_sortie
self.A = np.random.randn(n_sortie, n_entree)
self.nb_params = (n_entree) * n_sortie # Nombre de parametres de la couche
self.save_X = None # Parametre de sauvegarde des donnees
def set_params(self,params) :
# Permet de modifier les parametres de la couche, en entree, prend un vecteur de la taille self.nb_params
self.A = params.reshape((self.n_sortie, self.n_entree))
def get_params(self) :
# Rend un vecteur de taille self.params qui contient les parametres de la couche
return np.ravel(self.A)
def forward(self,X) :
# calcul du forward, X est le vecteur des donnees d'entrees
self.save_X = np.copy(X)
return np.matmul(self.A, X)
def backward(self,grad_sortie) :
# retropropagation du gradient sur la couche,
#grad_sortie est le vecteur du gradient en sortie
#Cette fonction rend :
#grad_local, un vecteur de taille self.nb_params qui contient le gradient par rapport aux parametres locaux
#grad_entree, le gradient en entree de la couche
Xt = np.transpose(self.save_X, (0, 2, 1))
Xr = | np.matmul(grad_sortie, Xt) | numpy.matmul |
import os
import csv
import cv2
import numpy as np
import sklearn
from random import shuffle
from numpy import zeros, newaxis
from sklearn.model_selection import train_test_split
test_split = 0.25
samples = []
images = []
angles = []
cam_delta_left = 0.25 # 0.15 # 0.25 0.08
cam_delta_right = 0.25
nb_epoch = 5
batch_size = 128
model_name = "10-LeNet-CropFlip-bigdata.h5"
with open('./data/project_dataset_log.csv') as csvfile :
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
shuffle(samples)
## Prepare training / validation samples
train_samples, validation_samples = train_test_split(samples, test_size=test_split)
def load_image(image_path):
img = cv2.imread(image_path)
return img
def gray_conversion(x):
import tensorflow as tf
return tf.image.rgb_to_grayscale(x)
def get_current_path(path):
path_elem = path
image_path = './'+'/'.join(path_elem.split('/')[-4:])
return image_path
def load_in_memory():
for line in samples :
# center,left,right,steering,throttle,brake,speed
center_image = load_image(get_current_path(line[0]))
center_angle = float(line[3])
images.append(center_image)
angles.append(center_angle)
## Augment dataset by adding flipped image and its measurement
## Center & flipped
center_image_flipped = np.fliplr(center_image)
center_angle_flipped = center_angle*(-1)
images.append(center_image_flipped)
angles.append(center_angle_flipped)
## Left & flipped
left_image = load_image(get_current_path(line[1]))
left_angle = center_angle + cam_delta_left
images.append(left_image)
angles.append(left_angle)
left_image_flipped = | np.fliplr(left_image) | numpy.fliplr |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from __future__ import division
import collections
from textwrap import dedent
import numpy as np
from .core import (ParametricModel, Parametric1DModel, Parametric2DModel,
Model, format_input, ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from ..utils import find_current_module
__all__ = sorted([
'AiryDisk2D', 'Beta1D', 'Beta2D', 'Box1D',
'Box2D', 'Const1D', 'Const2D', 'Disk2D',
'Gaussian1D', 'Gaussian2D', 'Linear1D', 'Lorentz1D',
'MexicanHat1D', 'MexicanHat2D', 'Scale', 'Shift',
'Sine1D', 'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D',
'custom_model_1d'
])
class Gaussian1D(Parametric1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
See Also
--------
Gaussian2D, Box1D, Beta1D, Lorentz1D
"""
amplitude = Parameter('amplitude')
mean = Parameter('mean')
stddev = Parameter('stddev')
def __init__(self, amplitude, mean, stddev, **constraints):
try:
param_dim = len(amplitude)
except TypeError:
param_dim = 1
super(Gaussian1D, self).__init__(param_dim=param_dim,
amplitude=amplitude, mean=mean,
stddev=stddev, **constraints)
@staticmethod
def eval(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
class Gaussian2D(Parametric2DModel):
"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float
Standard deviation of the Gaussian in x.
x_stddev and y_stddev must be specified unless a covariance
matrix (cov_matrix) is input.
y_stddev : float
Standard deviation of the Gaussian in y.
x_stddev and y_stddev must be specified unless a covariance
matrix (cov_matrix) is input.
theta : float, optional
Rotation angle in radians. The rotation angle increases clockwise.
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the x_stddev,
y_stddev, and theta specification.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\\left(x - x_{0}\\right)^{2} -b\\left(x - x_{0}\\right)
\\left(y - y_{0}\\right) -c\\left(y - y_{0}\\right)^{2}}
Using the following definitions:
.. math::
a = \\left(\\frac{\\cos^{2}{\\left (\\theta \\right )}}{2 \\sigma_{x}^{2}} +
\\frac{\\sin^{2}{\\left (\\theta \\right )}}{2 \\sigma_{y}^{2}}\\right)
b = \\left(\\frac{-\\sin{\\left (2 \\theta \\right )}}{2 \\sigma_{x}^{2}} +
\\frac{\\sin{\\left (2 \\theta \\right )}}{2 \\sigma_{y}^{2}}\\right)
c = \\left(\\frac{\\sin^{2}{\\left (\\theta \\right )}}{2 \\sigma_{x}^{2}} +
\\frac{\\cos^{2}{\\left (\\theta \\right )}}{2 \\sigma_{y}^{2}}\\right)
See Also
--------
Gaussian1D, Box2D, Beta2D
"""
amplitude = Parameter('amplitude')
x_mean = Parameter('x_mean')
y_mean = Parameter('y_mean')
x_stddev = Parameter('x_stddev')
y_stddev = Parameter('y_stddev')
theta = Parameter('theta')
def __init__(self, amplitude, x_mean, y_mean, x_stddev=None, y_stddev=None,
theta=0.0, cov_matrix=None, **constraints):
if y_stddev is None and cov_matrix is None:
raise InputParameterError(
"Either x/y_stddev must be specified, or a "
"covariance matrix.")
elif x_stddev is None and cov_matrix is None:
raise InputParameterError(
"Either x/y_stddev must be specified, or a "
"covariance matrix.")
elif cov_matrix is not None and (x_stddev is not None or
y_stddev is not None):
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev")
# Compute principle coordinate system transformation
elif cov_matrix is not None:
cov_matrix = np.array(cov_matrix)
assert cov_matrix.shape == (2, 2), "Covariance matrix must be 2x2"
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
super(Gaussian2D, self).__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **constraints)
@staticmethod
def eval(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * (-(sin2t / xstd2) + (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * (-(sin2t / xstd2) + (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (-cos2t / xstd2) + (cos2t / ystd2)
db_dx_stddev = sin2t / xstd3
db_dy_stddev = -sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
class Shift(Model):
"""
Shift a coordinate.
Parameters
----------
offsets : float or a list of floats
offsets to be applied to a coordinate
if a list - each value in the list is an offset to be applied to a
column in the input coordinate array
"""
offsets = Parameter('offsets')
def __init__(self, offsets, param_dim=1):
if not isinstance(offsets, collections.Sequence):
param_dim = 1
else:
param_dim = len(offsets)
self._offsets = offsets
super(Shift, self).__init__(param_dim=param_dim)
def inverse(self):
if self.param_dim == 1:
return Shift(offsets=(-1) * self._offsets)
else:
return Shift(offsets=[off * (-1) for off in self._offsets])
@format_input
def __call__(self, x):
"""
Transforms data using this model.
Parameters
----------
x : array like or a number
input
"""
return self._offsets + x
class Scale(Model):
"""
Multiply a model by a factor.
Parameters
----------
factors : float or a list of floats
scale for a coordinate
"""
factors = Parameter('factors')
def __init__(self, factors, param_dim=1):
if not isinstance(factors, collections.Sequence):
param_dim = 1
else:
param_dim = len(factors)
self._factors = factors
super(Scale, self).__init__(param_dim=param_dim)
def inverse(self):
if self.param_dim == 1:
return Scale(factors=1. / self._factors)
else:
return Scale(factors=[1 / factor for factor in self._factors])
@format_input
def __call__(self, x):
"""
Transforms data using this model.
Parameters
----------
x : array like or a number
input
"""
return self._factors * x
class Sine1D(Parametric1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x)
"""
amplitude = Parameter('amplitude')
frequency = Parameter('frequency')
def __init__(self, amplitude, frequency, **constraints):
super(Sine1D, self).__init__(amplitude=amplitude,
frequency=frequency,
**constraints)
@staticmethod
def eval(x, amplitude, frequency):
"""One dimensional Sine model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
@staticmethod
def deriv(x, amplitude, frequency):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(2 * np.pi * frequency * x)
d_frequency = (2 * np.pi * x * amplitude *
np.cos(2 * np.pi * frequency * x))
return [d_amplitude, d_frequency]
class Linear1D(Parametric1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter('slope')
intercept = Parameter('intercept')
linear = True
def __init__(self, slope, intercept, **constraints):
super(Linear1D, self).__init__(slope=slope, intercept=intercept,
**constraints)
@staticmethod
def eval(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def deriv(x, slope, intercept):
"""One dimensional Line model derivative"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
class Lorentz1D(Parametric1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
"""
amplitude = Parameter('amplitude')
x_0 = Parameter('x_0')
fwhm = Parameter('fwhm')
def __init__(self, amplitude, x_0, fwhm, **constraints):
super(Lorentz1D, self).__init__(amplitude=amplitude, x_0=x_0,
fwhm=fwhm, **constraints)
@staticmethod
def eval(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
class Const1D(Parametric1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
"""
amplitude = Parameter('amplitude')
def __init__(self, amplitude, **constraints):
super(Const1D, self).__init__(amplitude=amplitude, **constraints)
@staticmethod
def eval(x, amplitude):
"""One dimensional Constant model function"""
return amplitude * np.ones_like(x)
@staticmethod
def deriv(x, amplitude):
"""One dimensional Constant model derivative"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
class Const2D(Parametric2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter('amplitude')
def __init__(self, amplitude, **constraints):
super(Const2D, self).__init__(amplitude=amplitude, **constraints)
@staticmethod
def eval(x, y, amplitude):
"""Two dimensional Constant model function"""
return amplitude * np.ones_like(x)
class Disk2D(Parametric2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter('amplitude')
x_0 = Parameter('x_0')
y_0 = Parameter('y_0')
R_0 = Parameter('R_0')
def __init__(self, amplitude, x_0, y_0, R_0, **constraints):
super(Disk2D, self).__init__(amplitude=amplitude, x_0=x_0,
y_0=y_0, R_0=R_0, **constraints)
@staticmethod
def eval(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
return np.select([rr <= R_0 ** 2], [amplitude])
class Ring2D(Parametric2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\textnormal{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter('amplitude')
x_0 = Parameter('x_0')
y_0 = Parameter('y_0')
r_in = Parameter('r_in')
width = Parameter('width')
def __init__(self, amplitude, x_0, y_0, r_in, width=None, r_out=None,
**constraints):
if r_out is not None:
width = r_out - r_in
if r_out is None and width is None:
raise ModelDefinitionError("Either specify width or r_out.")
super(Ring2D, self).__init__(amplitude=amplitude, x_0=x_0,
y_0=y_0, r_in=r_in, width=width,
**constraints)
@staticmethod
def eval(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
return np.select([r_range], [amplitude])
class Delta1D(Parametric1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Parametric2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Parametric1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\geq x \\geq x_0 + w/2 \\\\
0 & : \\textnormal{else}
\\end{array}
\\right.
"""
amplitude = Parameter('amplitude')
x_0 = Parameter('x_0')
width = Parameter('width')
def __init__(self, amplitude, x_0, width, **constraints):
super(Box1D, self).__init__(amplitude=amplitude, x_0=x_0,
width=width, **constraints)
@staticmethod
def eval(x, amplitude, x_0, width):
"""One dimensional Box model function"""
return np.select([np.logical_and(x >= x_0 - width / 2.,
x <= x_0 + width / 2.)],
[amplitude], 0)
@classmethod
def deriv(cls, x, amplitude, x_0, width):
"""One dimensional Box model derivative"""
d_amplitude = cls.eval(x, 1, x_0, width)
d_x_0 = | np.zeros_like(x) | numpy.zeros_like |
from tkinter.ttk import Frame
from tkinter import *
from PIL import Image, ImageTk, ImageSequence
import tkinter as tk
from tkinter import ALL
import random
import numpy as np
import time
import json
# handle image using json
with open('dataQlearning.json') as f: dataImg = json.load(f) # load data from json
raw_data_img1 = dataImg["imgWumpusGame"][0] # take dictionary of element 0
wallHero = raw_data_img1.get('img1') # get data img1
raw_data_img2 = dataImg["imgWumpusGame"][1] # take dictionary of element 1
breezeHero = raw_data_img2.get('img2') # get data img2
raw_data_img3 = dataImg["imgWumpusGame"][2] # take dictionary of element 2
agentHero = raw_data_img3.get('img3') # get data img3
raw_data_img4 = dataImg["imgWumpusGame"][3] # take dictionary of element 3
wumpusHero = raw_data_img4.get('img4') # get data img4
raw_data_img5 = dataImg["imgWumpusGame"][4] # take dictionary of element 4
floorHero = raw_data_img5.get('img5') # get data img5
raw_data_img6 = dataImg["imgWumpusGame"][5] # take dictionary of element 5
pitHero = raw_data_img6.get('img6') # get data img6
raw_data_img7 = dataImg["imgWumpusGame"][6] # take dictionary of element 6
goldHero = raw_data_img7.get('img7') # get data img7
raw_data_img8 = dataImg["imgWumpusGame"][7] # take dictionary of element 7
stenchHero = raw_data_img8.get('img8') # get data img8
# main process for game: set GUI, button, map, image, handle movement, check collision...
class MainProcess(tk.Canvas):
def __init__(self, master, mapGame, agent_qlearning, *args, **kwargs):
# master: Frame, map: map of game, agent_qlearning: type of agent is used in game is q_learning
tk.Canvas.__init__(self, *args, **kwargs, bg='#26201c')
self.master = master
self.map = mapGame
self.agent_qlearning = agent_qlearning
text = Label(self, text="Please wait about 5s to finish train the agent", bg="#26201c", fg="#fff")
text.place(x=90, y=490)
# set button type of game
self.button = Button(text="Start wumpus Qlearning", fg="black", command=self.StartButtonQlearning, bg="pink")
self.button.place(x=135, y=440)
self.Score = 0 # default score of agent is 0
# init for map
self.Init_Map()
self.pack() # self.canvas.pack()
# button use for logic agent
def StartButtonQlearning(self):
self.delete(ALL) # clear all items from the canvas to start on a clean slate
self.StartWumpusQlearning() # start logic agent game from button
# init image for game
def InitImageGame(self, wallMap, breezeImg, agentImg, wumpusImg, floorImg, pitImg, goldImg, stenchImg):
self.wallMap = Image.open(wallMap) # set image for map
MAX_SIZE_THUMBNAIL_WALL = (42, 42) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.wallMap.thumbnail(MAX_SIZE_THUMBNAIL_WALL, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.map_locs = ImageTk.PhotoImage(self.wallMap) # get position of map for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.wind = Image.open(breezeImg) # set image for breeze
MAX_SIZE_THUMBNAIL_WIND = (30, 30) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.wind.thumbnail(MAX_SIZE_THUMBNAIL_WIND, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.breeze_locs = ImageTk.PhotoImage(self.wind) # get position of breeze for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.player = Image.open(agentImg) # set image for agent
MAX_SIZE_THUMBNAIL_PLAYER = (40, 40) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.player.thumbnail(MAX_SIZE_THUMBNAIL_PLAYER, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.player_location = ImageTk.PhotoImage(self.player) # get position of agent for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.wumpus = Image.open(wumpusImg) # set image for wumpus
MAX_SIZE_THUMBNAIL_MONSTER = (30, 30) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.wumpus.thumbnail((MAX_SIZE_THUMBNAIL_MONSTER), Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.wumpus_locs = ImageTk.PhotoImage(self.wumpus) # get position of wumpus for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.lane = Image.open(floorImg) # set image path agent had moved
MAX_SIZE_THUMBNAIL_FLOOR = (40, 40) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.lane.thumbnail(MAX_SIZE_THUMBNAIL_FLOOR, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.floor_locs = ImageTk.PhotoImage(self.lane) # get position of floor for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.pit = Image.open(pitImg) # set image for pit
MAX_SIZE_THUMBNAIL_PIT = (30, 30) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.pit.thumbnail(MAX_SIZE_THUMBNAIL_PIT, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.pit_locs = ImageTk.PhotoImage(self.pit) # get position of pit for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.gold = Image.open(goldImg) # set image for goal
MAX_SIZE_THUMBNAIL_GOLD = (30, 30) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.gold.thumbnail(MAX_SIZE_THUMBNAIL_GOLD, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.gold_locs = ImageTk.PhotoImage(self.gold) # get position of gold for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.smell = Image.open(stenchImg) # set image for stench
MAX_SIZE_THUMBNAIL_STENCH = (30, 30) # modifiy the image to contain a thumbnail version of itself, no larger than the given size
self.smell.thumbnail(MAX_SIZE_THUMBNAIL_STENCH, Image.ANTIALIAS) # use antialias to get a high-quality downsampling filter
self.stench_locs = ImageTk.PhotoImage(self.smell) # get position of stench for drawing, PhotoImage class is used to display images in labels, buttons, canvases, and text widgets
self.isWin = False # set default win = False
self.GameOver = False # set default game over = False
# default map: x_start = 0, y_end = 9, these value use for random position of agent
self.row = 0
self.column = 9
def Create_tiles(self, x, y): self.create_image(x, y, image=self.map_locs, anchor=NW, tag="tiles") # create image cells for map game
def DrawTiles(self):
self.Create_tiles(10, 10) # draw tile at first position
# min row/column: 10, max row/column: 370
# j = 0, 40, 80, 120, 160, 200, 240, 280, 320, 360
for j in range(0, 361, 40):
# i = 0, 40, 80, 120, 160, 200, 240, 280, 320
for i in range(0, 321, 40): self.Create_tiles(50 + i, 10 + j) # tile's interval (10, 50, 90, 130, 170, 210, 250, 290, 330, 370)
for i in range(0, 321, 40): self.Create_tiles(10, 50 + i) # draw all cells in first column except first cell had drawn before
self.pack(fill=BOTH, expand=1)
def Init_Map(self):
self.InitImageGame(wallHero, breezeHero, agentHero, wumpusHero, floorHero, pitHero, goldHero, stenchHero) # init for game
self.DrawTiles() # draw image for map
self.create_map() # create map
def StartWumpusQlearning(self):
self.Init_Map() # init map for logic game
x_agent, y_agent = self.agent_qlearning.get_position() # get current position of agent
error_square_grid = 10 # change this value to balance distance wall stains when agent moving
# thumbnail = (40,40), so we have to take current coordinate of agent multiply 40
self.x, self.y = self.Player_Location(error_square_grid + y_agent * 40, error_square_grid + x_agent * 40)
self.agent_qlearning.training(self.map) # start training agent
# if agent didn't finish process (finish limit move or episode)
while not self.agent_qlearning.finishProcess():
row, column = self.agent_qlearning.get_action() # get action agent could be do at that position
self.agent_qlearning.move(row, column, self.map[row][column]) # get best move
self.Move(column, row) # start move agent
self.Score = self.agent_qlearning.get_total_rewards() # get total score of agent
time.sleep(0.1)
self.update() # update each state moving
self.IsGameOver() # check if agent lose the game
self.IsAgentWin() # check if agent win the game
def IsGameOver(self):
if self.GameOver: self.Animation_GameOver() # animation if agent lose the game
def IsAgentWin(self):
if self.isWin: self.Animation_Winner() # animation if agent win the game
def AnimateGifWin(self, counter1):
self.sequence = [ImageTk.PhotoImage(img) for img in ImageSequence.Iterator(Image.open("image/wingame.gif"))]
self.image = self.create_image(200, 200, image=self.sequence[0])
self.itemconfig(self.image, image=self.sequence[counter1])
self.after(100, lambda: self.AnimateGifWin((counter1+1) % len(self.sequence)))
def Animation_Winner(self):
self.delete(ALL) # delete all state of game after finishing, then animate gif image
while True:
self.AnimateGifWin(1)
self.update()
def AnimateGifLose(self, counter):
self.sequence = [ImageTk.PhotoImage(img) for img in ImageSequence.Iterator(Image.open("image/gameover.gif"))]
self.image = self.create_image(200, 200, image=self.sequence[0])
self.itemconfig(self.image, image=self.sequence[counter])
self.after(100, lambda: self.AnimateGifLose((counter+1) % len(self.sequence)))
def Animation_GameOver(self):
self.delete(ALL) # delete all state of game after finishing, then animate gif image
while True:
self.AnimateGifLose(1)
self.update()
def ConvertLocationToCoordinate(self, column, row):
# make row/col from 0-9 become: 10, 50, 90, 130, 170, 210, 250, 290, 330, 370 for handling movement of image
column, row = 10 + 40 * column, 10 + 40 * row # thumbnail agent set (40, 40)
return column, row
def Move(self, x, y):
# convert current position to coordinate, example agent's current position is (4,8) -> coordinate image (170, 330)
x, y = self.ConvertLocationToCoordinate(x, y) # min coordinate: 10 + 40 * 0 = 10, max coordinate: 10 + 40 * 9 = 370
location = self.find_withtag("player") # get agent
if len(location) != 0: self.delete(location[0]) # if current location of agent after randoming is not an empty cell
################### need fix something here for better movement ##################
if self.x == x:
if self.y > y and (self.y - y) == 40: self.Top(x, y)
elif self.y > y and (self.y - y) > 40:
y = self.y - 40
self.Top(x, y)
elif self.y < y and (y - self.y) == 40: self.Down(x, y)
elif self.y < y and (y - self.y) > 40:
self.y = y - 40
self.Down(x, y)
elif self.y == y:
if self.x > x and (self.x - x) == 40: self.Left(x, y)
elif self.x > x and (self.x - x) > 40:
x = self.x - 40
self.Left(x, y)
elif self.x < x and (x - self.x) == 40: self.Right(x, y)
elif self.x < x and (x - self.x) > 40:
self.x = x - 40
self.Right(x, y)
elif self.x != x and self.y != y:
if self.y > self.x and y > x:
if (self.x - x) == 40 or (x - self.x) == 40:
x = self.x
if self.y > y: self.Top(x, y)
else: self.Down(x, y)
elif (x - self.x) > 40:
y = self.y
self.Right(x, y)
elif (self.x - x) > 40:
y = self.y
self.Left(x, y)
'''
elif self.x > self.y and x > y:
if (self.y - y) == 40 or (y - self.y) == 40:
y = self.y
if self.x > x: self.Left(x, y)
else: self.Right(x, y)
elif (y - self.y) > 40:
x = self.x
self.Down(x, y)
elif (self.y - y) > 40:
x = self.x
self.Top(x, y)
'''
################### need fix something here for better movement ##################
def Left(self, x, y):
thumbnailDistanceBetweenCell = 40
x = x + thumbnailDistanceBetweenCell # default error distance when moving between cells
self.create_image((x, y), image=self.floor_locs, anchor=NW, tag="lane") # print footprint in map when agent moving
self.row -= 1 # decrease value row when moving left
self.x, self.y = self.Player_Location(x - thumbnailDistanceBetweenCell, y) # update new position of agent
self.checkCollision() # check left direction is collision or not
def Right(self, x, y):
thumbnailDistanceBetweenCell = 40
x = x - thumbnailDistanceBetweenCell # default error distance when moving between cells
self.create_image((x, y), image=self.floor_locs, anchor=NW, tag="lane") # print footprint in map when agent moving
self.row += 1 # increase value row when moving right
self.x, self.y = self.Player_Location(x + thumbnailDistanceBetweenCell, y) # update new position of agent
self.checkCollision() # check left direction is collision or not
def Top(self, x, y):
thumbnailDistanceBetweenCell = 40
y = y + thumbnailDistanceBetweenCell # default error distance when moving between cells
self.create_image((x, y), image=self.floor_locs, anchor=NW, tag="lane") # print footprint in map when agent moving
self.column -= 1 # decrease value column when moving up
self.x, self.y = self.Player_Location(x, y - thumbnailDistanceBetweenCell) # update new position of agent
self.checkCollision() # check left direction is collision or not
def Down(self, x, y):
thumbnailDistanceBetweenCell = 40
y = y - thumbnailDistanceBetweenCell # default error distance when moving between cells
self.create_image((x, y), image=self.floor_locs, anchor=NW, tag="lane") # print footprint in map when agent moving
self.column += 1 # increase value column when moving down
self.x, self.y = self.Player_Location(x, y + thumbnailDistanceBetweenCell) # update new position of agent
self.checkCollision() # check left direction is collision or not
def Player_Location(self, x, y):
self.create_image(x, y, image=self.player_location, anchor=NW, tag="player") # draw image agent each step moving
print("current row and column of path's agent:", [x, y])
return x, y
def addWumpus(self, x, y): self.create_image(x, y, image=self.wumpus_locs, anchor=NW, tag="wumpus") # draw image for wumpus
def addPit(self, x, y): self.create_image(x, y, image=self.pit_locs, anchor=NW, tag="pit") # draw image for pit
def addGold(self, x, y): self.create_image(x, y, image=self.gold_locs, anchor=NW, tag="gold") # draw image for gold
def addBreeze(self, x, y): self.create_image(x, y, image=self.breeze_locs, anchor=NW, tag="breeze") # draw image for breeze
def addStench(self, x, y): self.create_image(x, y, image=self.stench_locs, anchor=NW, tag="stench") # draw image for stench
# add wumpus, pit, gold, breeze, stench to map
def create_map(self):
size_map = 10 # default size of map is 10x10
distance_between_object = 14 # change this value for distance position of objects in map game
for row in range(size_map):
for column in range(size_map):
# if at that cell is pit(character 1), thumbnail floor(40,40), so multiply row/column by 40
if self.map[row][column] == 1: self.addPit(distance_between_object + column * 40, distance_between_object + row * 40)
# if at that cell is breeze(character 2), thumbnail floor(40,40), so multiply row/column by 40
if self.map[row][column] == 2: self.addBreeze(distance_between_object + column * 40, distance_between_object + row * 40)
# if at that cell is wumpus(character 3), thumbnail floor(40,40), so multiply row/column by 40
if self.map[row][column] == 3: self.addWumpus(distance_between_object + column * 40, distance_between_object + row * 40)
# if at that cell is stench(character 4), thumbnail floor(40,40), so multiply row/column by 40
if self.map[row][column] == 4: self.addStench(distance_between_object + column * 40, distance_between_object + row * 40)
# if at that cell is gold(character 5), thumbnail floor(40,40), so multiply row/column by 40
if self.map[row][column] == 5: self.addGold(distance_between_object + column * 40, distance_between_object + row * 40)
def checkCollision(self):
# find tags of objects to check collision
wumpus = self.find_withtag("wumpus")
pit = self.find_withtag("pit")
gold = self.find_withtag("gold")
player = self.find_withtag("player")
breeze = self.find_withtag("breeze")
smell = self.find_withtag("stench")
x1, y1, x2, y2 = self.bbox(player) # get 4 points coordinate move and next move of agent
overlap = self.find_overlapping(x1, y1, x2, y2) # add tag to all items which overlap the rectangle defined by x1,y1,x2,y2
for over in overlap:
for w in wumpus:
if w == over:
self.GameOver = True # set state game to lose for being eaten by wumpus
print("Game Over!!! Your agent are eaten by wumpus")
for p in pit:
if p == over:
self.GameOver = True # set state game to lose for falling into the pit
print("Game Over, Your agent are fallen into the pit")
for g in gold:
if g == over:
self.delete(g) # if agent find gold, delete gold from map
self.Score += 100 # then add 100 points for each gold agent eat
print("Current score of agent:", self.Score)
if len(gold) == 1:
print("Agent win the game")
self.isWin = True # set state game to win if agent find and eat all gold
for b in breeze:
if b == over: print("Cell has breeze") # print notify for moving into breeze cells
for s in smell:
if s == over: print("Cell has stench") # print notify for moving into stench cells
performanceMeasure = dict() # create a dictionary for calculating point of reward table
performanceMeasure[0] = -10 # move to empty cell -10 points in reward system
performanceMeasure[1] = -10000 # falling in pit -10000 points in reward system
performanceMeasure[2] = -10 # move to breeze cells -10 points in reward system
performanceMeasure[3] = -10000 # being eaten by wumpus -10000 points in reward system
performanceMeasure[4] = -10 # move to stench cells -10000 points in reward system
performanceMeasure[5] = -10 # pick up gold -10 points in reward system
def DictMapScore(scoreMap):
initMapScore = dict() # create a dictionary store score of map
initMapScore[0] = -1 # move to empty cells -1 point
initMapScore[1] = -10000 # falling into pit -10000 points
initMapScore[2] = -1 # move to breeze cells -1 point
initMapScore[3] = -10000 # being eaten by wumpus -10000 points
initMapScore[4] = -1 # move to stench cells -1 point
initMapScore[5] = 100 # pick up gold +100 points
return initMapScore[scoreMap]
# https://www.freecodecamp.org/news/an-introduction-to-q-learning-reinforcement-learning-14ac0b4493cc/
def initRewardTable():
q_table = np.zeros((100, 100), dtype=int) # create a maxtrix 100x100 with all values are 0
size_map = 10 # size default of map
for row in range(size_map):
for column in range(size_map):
# get 4 neighbor cells at that cell
for cell in GetNeighborCells(row, column):
# example current position agent is (9,8) -> x_pos = 9*10+8=98, 4 points neighbor of that cell is (9,9), (9,7), (8,8), (10,8)
# (10,8) is out of bound, so ignore that position, (9,9)->y_pos=99, (9,7)->y_pos=97, (8,8)->y_pos=88
# each cell move decrease -1 point, so update -1
# set (98,99), (98,97), (98, 88) value -1, similarly all cells in matrix 100x100 of q_table are -1 (just update -1 when this cell is directly reachable from previous cell)
# else if a location is not directly reachable from a particular location, give a reward of 0
q_table[row * 10 + column][cell.row * 10 + cell.column] = -1
return q_table
# update new state at that cell in q_table
def updateRewardTable(q_table, row, column, new_state_qtable):
for cell in GetNeighborCells(row, column):
# if cells have breeze, pit, wumpus, stench, update cells at q_table from -1 to correspond states
q_table[cell.row * 10 + cell.column][row * 10 + column] = new_state_qtable
def RewardtableToMap(mapGame, q_table):
size_map = 10 # default size of map
for row in range(size_map):
for column in range(size_map):
for cell in GetNeighborCells(row, column):
# update correspond reward from map to q_table
q_table[cell.row * 10 + cell.column][row * 10 + column] = DictMapScore(map[row][column])
# https://towardsdatascience.com/simple-reinforcement-learning-q-learning-fcddc4b6fe56
class QAgent:
def __init__(self, row, column, max_move, discount_factor=0.8, learning_rate=0.5, epsilon=0.1, episode=100):
self.map = np.zeros((10, 10), dtype=int) # create default size map is a matrix 10x10
self.row = row
self.column = column
self.state = row * 10 + column # current state of agent in matrix 100x100 of reward table
self.reward_table = initRewardTable()
# create a q_table size 100x100, default all values from q_table are 0, these values will change after training
self.q_table = np.array(np.zeros([100, 100])) # initializing q-values
''' need fix for double q-learning
self.q_a_table = np.array(np.zeros([100, 100])) # initializing q-a-values
self.q_b_table = np.array(np.zeros([100, 100])) # initializing q-b-values
self.q_table = self.q_a_table + self.q_b_table # initializing q-values
'''
self.max_move = max_move # limit move of agent for training purpose
self.total_reward = 0 # default reward of agent is 0
self.episode = episode # number of game play, update and store Q-value after an episode, when the episode initially starts, every Q-value is 0
self.discount_factor = discount_factor # gamma: balance immediate and future reward, usually in range[0.8, 0.99]
self.learning_rate = learning_rate # alpha: defined how much you accept the new value with the old value
self.epsilon = epsilon # epsilon: balance exploration by using epsilon, greed 10%
def adjust_discount_factor(self, discount_factor):
self.discount_factor = discount_factor
def adjust_learning_rate(self, learning_rate):
self.learning_rate = learning_rate
def adjust_epsilon(self, epsilon):
self.epsilon = epsilon
def adjust_episode(self, episode):
self.episode = episode
def move(self, row, column, new_state_qtable):
# update new state of reward table after each step moving of agent
updateRewardTable(self.reward_table, row, column, performanceMeasure[new_state_qtable])
self.row = row
self.column = column
self.state = self.row * 10 + self.column
old_reward = self.total_reward # old reward of agent
self.total_reward += DictMapScore(new_state_qtable) # update point of agent after each new state from q_table
# if total reward is a negative number, decrease an episode and continue training agent with a new episode
if self.total_reward - old_reward < -1: self.episode -= 1
self.update_qtable(row, column)
self.max_move -= 1 # decrease max limit move of agent (limit move of agent just for training purpose)
def get_position(self):
return self.row, self.column # get current position of agent
def eval_actions(self):
epsilon = 0.5
available_actions = []
for matrix in range(100):
# assume current position of agent is (0, 1)
# if coordinate [1][0...99] from reward_table after training still = -1, apply action at that position
if self.reward_table[self.state][matrix] != 0: available_actions.append(matrix)
''' need optimize for evaluating action better
eps = 0.01
max_action = np.random.choice(available_actions)
for best_action in available_actions:
if self.q_table[self.state][best_action] > self.q_table[self.state][max_action]: max_action = best_action
elif self.q_table[self.state][best_action] == self.q_table[self.state][max_action]:
if np.random.uniform(0, 1) < eps: max_action = best_action
self.state = max_action
return max_action
'''
# break ties among max values randomly if ties exist
# if no ties exist, the max will be selected with probability = 1
# on each step, the agent selects maximum value over all the actions for state s' (maxQ(s',a'))
max_action = available_actions[0] # take first value for max_action, then compare to get max value action
# max_Q = np.where(np.max(available_actions) == available_actions)[0]
# max_action = np.random.choice(max_Q)
for action in available_actions:
# if q_table[1][0...99 != 0] > self.q_table[1][max(0...99) != 0] -> update max_action
if self.q_table[self.state][action] > self.q_table[self.state][max_action]: max_action = action
# elif 2 value are the same, random a number between 0 and 1, then compare with a value epsilon
elif self.q_table[self.state][action] == self.q_table[self.state][max_action]:
if random.random() < epsilon: max_action = action
self.state = max_action # update current state with max_action
return max_action
def get_action(self):
state_action = self.eval_actions() # get max action
# if state_action is an odd number such as 15, row = 1, column = 5 -> current state = 1 * 10 + 5 = 15
row, column = int(state_action / 10), int(state_action % 10)
return row, column
# https://medium.com/@curiousily/solving-an-mdp-with-q-learning-from-scratch-deep-reinforcement-learning-for-hackers-part-1-45d1d360c120
# https://www.learndatasci.com/tutorials/reinforcement-q-learning-scratch-python-openai-gym/
# https://blog.floydhub.com/an-introduction-to-q-learning-reinforcement-learning/
def update_qtable(self, row, column):
new_epsilon = 0.8 # compare these value with a random number to choose action
next_state = self.state # assume current position of agent is (1, 1) -> next_state = 1 * 10 + 1 = 11
# get 4 neighbors at that cell
for cell in GetNeighborCells(row, column):
state = cell.row * 10 + cell.column # four coordinates will be (1,0), (1,2), (0,1), (2,1) correspond to state 10, 12, 1, 21
r = self.reward_table
available_action = []
for i in range(100):
if r[next_state][i] != 0: available_action.append(i) # if reward_table[11][0...99] != 0 after training, add action at that position
action = np.argmax(self.q_table[next_state,])
next_max = action
Temporal_Difference = self.reward_table[state, next_state] + self.discount_factor * self.q_table[next_state, next_max] - self.q_table[state, next_state]
self.q_table[state, next_state] += self.learning_rate * Temporal_Difference
''' need fix for double q-learning
if np.random.rand() < 0.5:
self.q_a_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_b_table[next_state, np.argmax(self.q_a_table[next_state,])] - self.q_a_table[state, next_state])
else:
self.q_b_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_a_table[next_state, np.argmax(self.q_b_table[next_state,])] - self.q_b_table[state, next_state])
'''
rewards = self.reward_table
Q = self.q_table
r = np.copy(rewards) # copy the rewards matrix to new matrix
for i in range(10):
state = np.random.randint(0, 100) # pick up a state randomly
total_reward = 0 # default total reward is 0
MIN_INF, MAX_INF = -999, 100 # starting location and ending location of total_reward
while MIN_INF < total_reward < MAX_INF:
avai_actions = []
for j in range(100):
if r[state, j] != 0: avai_actions.append(j) # iterate through the new rewards matrix and get the actions != 0
# if value random < epsilon, pick an action randomly from the list of avai action which lead us to next state
if np.random.uniform(0, 1) < self.epsilon: next_state = np.random.choice(avai_actions) # explore action space
# else exploit learned values
else:
'''
max_next_state = np.max(Q[state, avai_actions])
action = avai_actions[0]
for i in avai_actions:
if Q[state][i] == max_next_state:
if random.random() < new_epsilon: action = i
else: action = np.argmax(Q[state,])
'''
max_next_state = Q[state, avai_actions[0]] # take first q_value
action = avai_actions[0] # take first action from list
for i in avai_actions:
if Q[state, i] > max_next_state:
max_next_state = Q[state, i] # update again max_next_state if find q_value bigger
action = i # update again action
elif Q[state][i] == max_next_state:
if random.random() < new_epsilon: action = i # if equal, take a value random, then compare to value epsilon to choose action
next_state = action
available_action = []
for i in range(100):
# after moving to next state, add action at next position that new reward table != 0
if r[next_state][i] != 0: available_action.append(i)
action = np.argmax(Q[next_state,])
next_max = action
Temporal_Difference = rewards[state, next_state] + self.discount_factor * Q[next_state, next_max] - Q[state, next_state]
Q[state, next_state] += self.learning_rate * Temporal_Difference
''' need fix for double q-learning
if np.random.rand() < 0.5:
self.q_a_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_b_table[next_state, np.argmax(self.q_a_table[next_state,])] - self.q_a_table[state, next_state])
else:
self.q_b_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_a_table[next_state, np.argmax(self.q_b_table[next_state,])] - self.q_b_table[state, next_state])
'''
total_reward += rewards[state, next_state] # update total reward of agent
state = next_state # change to next state
self.q_table = Q
def training(self, map):
print("Start training process")
epochs, penalties = 0, 0
new_epsilon = 0.8
RewardtableToMap(map, self.reward_table) # convert reward of map correspond to q_table
rewards = self.reward_table
gamma_training = 0.75
Q = self.q_table
rewards_new = np.copy(rewards) # copy the rewards matrix to new matrix
# q-learning process
for i in range(10000):
state = np.random.randint(0, 100) # pick up a state randomly
total_reward = 0 # default total reward is 0
playable_actions = [] # for traversing through the neighbor locations in the maze
for j in range(100):
if rewards_new[state, j] != 0: playable_actions.append(j) # iterate through the new rewards matrix and get the actions != 0
# if value random < epsilon, pick an action randomly from the list of playable action which lead us to next state
if np.random.uniform(0, 1) < self.epsilon: next_state = np.random.choice(playable_actions)
else:
'''
max_next_state = np.max(Q[state, playable_actions])
action = playable_actions[0]
for i in playable_actions:
if Q[state][i] == max_next_state:
if random.random() < new_epsilon: action = i
else: action = np.argmax(Q[state,])
'''
max_next_state = Q[state, playable_actions[0]]
action = playable_actions[0]
for i in playable_actions:
if Q[state, i] > max_next_state:
max_next_state = Q[state, i]
action = i
elif Q[state][i] == max_next_state:
if random.random() < new_epsilon: action = i
next_state = action
play_actions = []
for i in range(100):
# after moving to next state, add action at next position that new reward table != 0
if rewards_new[next_state][i] != 0: play_actions.append(i)
action = np.argmax(Q[next_state,])
next_max = action
Temporal_Difference = rewards[state, next_state] + gamma_training * Q[next_state, next_max] - Q[state, next_state]
Q[state, next_state] += self.learning_rate * Temporal_Difference
''' need fix for double q-learning
if np.random.rand() < 0.5:
self.q_a_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_b_table[next_state, np.argmax(self.q_a_table[next_state,])] - self.q_a_table[state, next_state])
else:
self.q_b_table += self.learning_rate * (self.reward_table[state, next_state] + self.discount_factor * self.q_a_table[next_state, np.argmax(self.q_b_table[next_state,])] - self.q_b_table[state, next_state])
'''
total_reward += rewards[state, next_state] # update total reward of agent
if self.total_reward == -10: penalties += 1
epochs += 1
self.q_table = Q
print("Training finished")
print(f"Results after {self.episode} episodes:")
print(f"Average timesteps per episode: {epochs / self.episode}")
print(f"Average penalties per episode: {penalties / self.episode}")
def finishProcess(self):
# if agent has no more limit move or finish episode
if self.max_move <= 0 or self.episode <= 0: return True
return False
def get_total_rewards(self):
return self.total_reward # return current total reward
def GenerateMap(mapFile):
size_map = 10
map = np.zeros((size_map, size_map), dtype=int) # create a matrix 10x10 for map
map_data = open(mapFile, 'r')
default_size_map = map_data.readline() # read size of map
for i in range(size_map):
line = map_data.readline() # read matrix of map
for j in range(len(line)):
if line[j] == 'P':
# count number of dot line from index 0 to character P at that line
# number of dot line = column position of pit in that line
map[i][line.count('.', 0, j)] = 1 # map[0][4], map[1][4], map[2][4], map[3][1]... is positions that have pit, set these positions value 1
SetNeighborCells(map, i, line.count('.', 0, j), 2) # set up 4 neighbor cells near pit cell character 2 (breeze)
if line[j] == 'W':
# count number of dot line from index 0 to character W at that line
# number of dot line = column position of wumpus in that line
map[i][line.count('.', 0, j)] = 3 # set up cells that have wumpus value 3
SetNeighborCells(map, i, line.count('.', 0, j), 4) # set up 4 neighbor cells near wumpus cell character 4 (stench)
if line[j] == 'G':
# count number of dot line from index 0 to character G at that line
# number of dot line = column position of gold in that line
map[i][line.count('.', 0, j)] = 5 # set cells that have gold value 5 and this value's also use for processing q_learning
# default random position of agent in any position of map
random_position_row, random_position_column = | np.random.randint(0, 10) | numpy.random.randint |
"""Class to generate a survey population of FRBs."""
from copy import deepcopy
from tqdm import tqdm
import numpy as np
from frbpoppy.misc import pprint
from frbpoppy.population import Population
from frbpoppy.rates import Rates
class SurveyPopulation(Population):
"""Class to create a survey population of FRBs."""
def __init__(self, cosmic_pop, survey, scat=False, scin=False,
mute=False, scale_by_area=True):
"""
Run a survey to detect FRB sources.
Args:
cosmic_pop (Population): Population class of FRB sources to observe
survey (Survey): Survey class with which to observe
scat (bool, optional): Whether to include scattering in signal to
noise calculations.
scin (bool, optional): Whether to apply scintillation to
observations.
mute (bool): Whether to suppress printing to terminal
scale_by_area (bool): Whether to scale detection rates to the sky
area visible to a survey. Only relevant for one-offs.
"""
if not mute:
pprint(f'Surveying {cosmic_pop.name} with {survey.name}')
# Stops RuntimeWarnings about nan values
np.warnings.filterwarnings('ignore')
# Check whether CosmicPopulation has been generated
try:
if cosmic_pop.frbs.ra is None:
m = 'You may have forgotten to generate your CosmicPopulation'
raise ValueError(m)
except AttributeError:
m = 'You probably switched the population and survey in the'
m += 'input of SurveyPopulation'
raise ValueError(m)
# Set up population
Population.__init__(self)
# Set attributes
self.name = f'{cosmic_pop.name}_{survey.name}'
self.vol_co_max = cosmic_pop.vol_co_max
self.n_days = cosmic_pop.n_days
self.repeaters = cosmic_pop.repeaters
self.frbs = deepcopy(cosmic_pop.frbs)
self.source_rate = Rates('source')
if self.repeaters:
self.burst_rate = Rates('burst')
self.scat = scat
self.scin = scin
self.survey = survey
self.scale_by_area = scale_by_area
# Set survey attributes if not available
if survey.n_days is None:
survey.n_days = self.n_days
# Calculations differ for repeaters
if self.repeaters is True and scin is True:
m = 'Scintillation is currently not implemented for '
m += 'RepeaterPopulations'
raise ValueError(m)
# For convenience
frbs = self.frbs
sr = self.source_rate
sr.tot = cosmic_pop.n_srcs
if self.repeaters:
br = self.burst_rate
br.tot = self.frbs.time.size
# Bursts which are too late have already been removed
self.n_brst_pr_src = np.count_nonzero(~np.isnan(self.frbs.time), 1)
br.late += br.tot - np.sum(self.n_brst_pr_src)
sr.late += sr.tot - len(self.n_brst_pr_src)
# Check whether source is in region
region_mask = survey.in_region(frbs.ra, frbs.dec, frbs.gl, frbs.gb)
frbs.apply(region_mask)
# Keep track of detection numbers
sr.out = np.sum(~region_mask)
if self.repeaters:
br.out = np.sum(self.n_brst_pr_src[~region_mask])
self.n_brst_pr_src = self.n_brst_pr_src[region_mask]
# Calculate dispersion measure across single channel
frbs.t_dm = survey.calc_dm_smear(frbs.dm)
# Set scattering timescale
if scat:
frbs.t_scat = survey.calc_scat(frbs.dm)
# Calculate total temperature
frbs.T_sky, frbs.T_sys = survey.calc_Ts(frbs.gl, frbs.gb)
# Calculate effective pulse width
frbs.w_eff = survey.calc_w_eff(frbs.w_arr, frbs.t_dm, frbs.t_scat)
# Calculate peak flux density
frbs.s_peak = survey.calc_s_peak(frbs.si,
frbs.lum_bol,
frbs.z,
frbs.dist_co,
frbs.w_arr,
frbs.w_eff,
f_low=cosmic_pop.f_min,
f_high=cosmic_pop.f_max)
# Calculations differ whether dealing with repeaters or not
if self.repeaters:
self.det_repeaters()
else:
self.det_oneoffs()
# Prevent additional memory usage
self.survey = None
def det_oneoffs(self):
"""Detect one-off frbs."""
frbs = self.frbs
survey = self.survey
# Account for beam offset
int_pro, offset = survey.calc_beam(shape=frbs.s_peak.shape)
frbs.s_peak *= int_pro
frbs.offset = offset # [deg]
# Calculate fluence [Jy*ms]
frbs.fluence = survey.calc_fluence(frbs.s_peak, frbs.w_eff)
# Calculate Signal to Noise Ratio
frbs.snr = survey.calc_snr(frbs.s_peak, frbs.w_arr, frbs.T_sys)
# Add scintillation
if self.scin:
# Ensure scattering has been calculated
if not isinstance(frbs.t_scat, np.ndarray):
frbs.t_scat = survey.calc_scat(frbs.dm)
# Calculate signal to noise ratio after scattering
frbs.snr = survey.calc_scint(frbs.t_scat, frbs.dist_co, frbs.gl,
frbs.gb, frbs.snr)
# Check whether frbs would be above detection threshold
snr_mask = (frbs.snr >= survey.snr_limit)
frbs.apply(snr_mask)
self.source_rate.faint = len(snr_mask) - np.count_nonzero(snr_mask)
# Distant frbs are redshifted out of your observing time
limit = 1/(1+frbs.z)
rate_mask = np.random.random(len(frbs.z)) <= limit
frbs.apply(rate_mask)
self.source_rate.late = np.size(rate_mask)
self.source_rate.late -= np.count_nonzero(rate_mask)
self.source_rate.det = len(frbs.snr)
# Calculate detection rates
if self.scale_by_area:
self.calc_rates(survey)
def det_repeaters(self):
"""Detect repeating frbs."""
frbs = self.frbs
survey = self.survey
br = self.burst_rate
sr = self.source_rate
# Set up a tuple of pointings if not given
survey.gen_pointings()
# t_obs in fractional days
t_obs = survey.t_obs / 86400
# Array with times of each pointing
max_t = survey.n_days
times = np.arange(0, max_t+t_obs, t_obs) # [days]
lsts = times*360*(24/23.9344696) % 360 # Local sidereal time [deg]
lsts += np.random.uniform(0, 360) # Add random offset
# Only keep bursts within survey time
time_mask = (frbs.time <= times[-1])
# n_brst_pr_src = np.count_nonzero(~np.isnan(self.frbs.time), 1)
frbs.apply(time_mask)
# Keep track of losses
br.late += np.sum(self.n_brst_pr_src - np.count_nonzero(time_mask, 1))
sr.late += len(time_mask) - len(self.frbs.time)
# Prepare for iterating over time
max_n_pointings = len(times) - 1
# Initialize some necessary arrays
if frbs.w_eff.ndim == 2 or frbs.lum_bol.ndim == 2:
sim_shape = frbs.time # 2D
else:
sim_shape = frbs.lum_bol # 1D
frbs.fluence = np.full_like(sim_shape, np.nan)
frbs.snr = np.full_like(sim_shape, np.nan)
# Have to loop over the observing times
ra_p = survey.pointings[0]
dec_p = survey.pointings[1]
lst = lsts[:-1]
self.srcs_not_in_pointing = np.ones_like(frbs.index, dtype=bool)
self.srcs_not_bright = np.ones_like(frbs.index, dtype=bool)
# Parameters needed for for-loop
keep = ([], [])
for i in tqdm(np.arange(max_n_pointings), desc='Pointings'):
xy = self._iter_pointings(ra_p[i % survey.n_pointings],
dec_p[i % survey.n_pointings],
lst[i],
times[i],
times[i+1])
keep[0].extend(xy[0])
keep[1].extend(xy[1])
sr.pointing = np.sum(self.srcs_not_in_pointing)
# Already outside of pointing, so unknown whether too faint
self.srcs_not_bright[self.srcs_not_in_pointing] = False
sr.faint = np.sum(self.srcs_not_bright)
# Create SNR mask
snr_mask = np.zeros_like(frbs.snr, dtype=bool)
snr_mask[keep] = True
frbs.apply(snr_mask)
# Keep track of detections
self.burst_rate.det = np.count_nonzero(snr_mask)
self.source_rate.det = len(np.unique(keep[0]))
# Reduce matrices' size
frbs.clean_up()
# Calculate detection rates
self.calc_rates(survey)
def _iter_pointings(self, ra_pt, dec_pt, lst, t_min, t_max):
frbs = self.frbs
survey = self.survey
# Which frbs are within the pointing time?
# Essential that each row is sorted from low to high!
# Returns col, row index arrays
t_ix = fast_where(frbs.time, t_min, t_max)
# What's the intensity of them in the beam?
int_pro, dx, dy = survey.calc_beam(repeaters=True,
ra=frbs.ra[t_ix[0]],
dec=frbs.dec[t_ix[0]],
ra_p=ra_pt,
dec_p=dec_pt,
lst=lst)
# If not an intensity of zero, they were inside the beam pattern
p_ix = ~np.isnan(int_pro)
# Time & position
tp_ix = (t_ix[0][p_ix], t_ix[1][p_ix])
# Time & not position
tnp_ix = (t_ix[0][~p_ix], t_ix[1][~p_ix])
# Number of bursts per source
tp_unique, n_bursts = np.unique(tp_ix[0], return_counts=True)
# Add to outside of pointing count
self.burst_rate.pointing += len(tnp_ix[0])
self.srcs_not_in_pointing[tp_unique] = False
# Ensure relevant masks
s_peak_ix = tp_unique
not_ix = np.unique(tnp_ix[0])
int_pro = int_pro[p_ix]
if frbs.s_peak.ndim == 2:
s_peak_ix = tp_ix
not_ix = tnp_ix
if frbs.s_peak.ndim == 1:
int_pro = int_pro[tp_unique]
# Apply intensities to those bursts' s_peak
frbs.s_peak[s_peak_ix] *= int_pro
frbs.s_peak[not_ix] = np.nan
w_eff_ix = tp_unique
if frbs.w_eff.ndim == 2:
w_eff_ix = tp_ix
# Ensure dimensionality is correct
s_peak = frbs.s_peak[s_peak_ix]
w_eff = frbs.w_eff[w_eff_ix]
if frbs.w_eff.ndim < frbs.s_peak.ndim:
w_eff = np.repeat(w_eff, n_bursts)
elif frbs.w_eff.ndim > frbs.s_peak.ndim:
s_peak = np.repeat(s_peak, n_bursts)
# Calculate fluence [Jy*ms]
frbs.fluence[s_peak_ix] = survey.calc_fluence(s_peak, w_eff)
# Construct masks with correct dimension
if frbs.w_arr.ndim == 2:
w_arr = frbs.w_arr[tp_ix]
elif frbs.w_arr.ndim == 1:
w_arr = frbs.w_arr[tp_unique]
# Ensure entries are repeated for the number of bursts
s_peak = frbs.s_peak[s_peak_ix]
if frbs.w_arr.ndim < frbs.s_peak.ndim:
w_arr = np.repeat(w_arr, n_bursts)
elif frbs.w_arr.ndim > frbs.s_peak.ndim:
s_peak = np.repeat(s_peak, n_bursts)
# And system temperatures
if frbs.T_sys.ndim == 1:
if frbs.s_peak.ndim == 2:
T_sys = np.repeat(frbs.T_sys[tp_unique], n_bursts)
elif frbs.s_peak.ndim == 1:
T_sys = frbs.T_sys[tp_unique]
elif frbs.T_sys.ndim == 0:
T_sys = frbs.T_sys
# Caculate Signal to Noise Ratio
frbs.snr[s_peak_ix] = survey.calc_snr(s_peak, w_arr, T_sys)
# Add scintillation
if self.scin:
# Not been fully tested with repeaters
# Might break due to differing dimensionality of dist and snr
t_scat = frbs.t_scat[tp_unique]
dist_co = frbs.dist_co[tp_unique]
gl = frbs.gl[tp_unique]
gb = frbs.gb[tp_unique]
snr = frbs.snr[s_peak_ix]
new_snr = survey.calc_scint(t_scat, dist_co, gl, gb, snr)
frbs.snr[s_peak_ix] = np.repeat(new_snr, n_bursts)
# Only keep those in time, in position and above the snr limit
snr_m = (frbs.snr[s_peak_ix] > survey.snr_limit)
s_peak_ix = (tp_ix[0][snr_m], tp_ix[1][snr_m])
self.burst_rate.faint += len(tp_ix[0]) - len(s_peak_ix[0])
self.srcs_not_bright[np.unique(s_peak_ix[0])] = False
return s_peak_ix[0], s_peak_ix[1]
def calc_rates(self, survey):
"""Calculate the relative detection rates."""
# Calculate scaling factors for rates
area_sky = 4*np.pi*(180/np.pi)**2 # In sq. degrees
f_area = survey.beam_size * self.source_rate.tot
inside = self.source_rate.det + self.source_rate.late
inside += self.source_rate.faint
if inside > 0:
f_area /= (inside*area_sky)
else:
f_area = 1
# Saving scaling factors
self.source_rate.days = self.n_days
self.source_rate.name = self.name
self.source_rate.vol = self.source_rate.tot
self.source_rate.vol /= self.vol_co_max * (365.25/self.n_days)
if self.repeaters:
self.burst_rate.days = self.n_days
self.burst_rate.name = self.name
self.burst_rate.vol = self.burst_rate.tot
self.burst_rate.vol /= self.vol_co_max * (365.25/self.n_days)
# If oneoffs, you'll want to scale by the area
if not self.repeaters:
self.source_rate.f_area = f_area
self.source_rate.scale_by_area()
def fast_where(a, min_v, max_v):
"""Faster implementation of np.where(((a >= min_v) & (a <= max_v)))."""
left = np.apply_along_axis(np.searchsorted, 1, a, min_v)
right = np.apply_along_axis(np.searchsorted, 1, a, max_v)
unique_rows = np.where(left <= right)[0]
bursts_per_row = right[unique_rows] - left[unique_rows]
rows = np.repeat(unique_rows, bursts_per_row)
cols = np.zeros(np.sum(bursts_per_row), dtype=int)
cum_bursts = | np.cumsum(bursts_per_row) | numpy.cumsum |
import numpy as np
# intra-package python imports
from ..kinetics.massaction import MassAction
from .cells import Cell
from .genes import TwoStateGene
class TwoStateCell(Cell):
"""
Class defines a cell with one or more protein coding genes. Transcription is based on a twostate model.
Attributes:
off_states (dict) - {name: node_id} pairs
on_states (dict) - {name: node_id} pairs
dosage (int) - dosage of each gene (used to set initial conditions)
Inherited Attributes:
transcripts (dict) - {name: node_id} pairs
proteins (dict) - {name: node_id} pairs
phosphorylated (dict) - {name: node_id} pairs
nodes (np.ndarray) - vector of node indices
node_key (dict) - {state dimension: node id} pairs
reactions (list) - list of reaction objects
stoichiometry (np.ndarray) - stoichiometric coefficients, (N,M)
N (int) - number of nodes
M (int) - number of reactions
I (int) - number of inputs
"""
def __init__(self,
genes=(),
I=1,
dosage=2,
**kwargs):
"""
Instantiate twostate cell with one or more protein coding genes.
Args:
genes (tuple) - names of genes
I (int) - number of input channels
dosage (int) - dosage of each gene (used to set initial conditions)
kwargs: keyword arguments for add_genes
"""
self.off_states = {}
self.on_states = {}
self.dosage = dosage
super().__init__(genes, I, **kwargs)
@property
def ic(self):
""" Default initial condition. """
ic = np.zeros(self.N, dtype=np.int64)
for off_state in self.off_states.values():
ic[off_state] = self.dosage
return ic
def constrain_ic(self, ic):
"""
Constrains initial condition to specified gene dosage.
Args:
ic (np.ndarray[double]) - initial condition
"""
for gene in self.off_states.keys():
# get current dosage specified by initial condition
off_state = self.off_states[gene]
on_state = self.on_states[gene]
currrent_dosage = ic[off_state] + ic[on_state]
# if dosage is correct, leave as is
if currrent_dosage == self.dosage:
continue
# if dosage is too low, add to the off state
elif currrent_dosage < self.dosage:
ic[off_state] += (self.dosage - currrent_dosage)
# if dosage is too high, remove from the on state first
while ic[off_state] + ic[on_state] > self.dosage:
if ic[on_state] > 0:
ic[on_state] -= 1
else:
ic[off_state] -= 1
def add_gene(self, **kwargs):
"""
Add individual gene.
kwargs: keyword arguments for Gene instantiation
"""
gene = TwoStateGene(**kwargs)
# update nodes and reactions
shift = self.nodes.size
added_node_ids = np.arange(shift, shift+gene.nodes.size)
self.update_reaction_dimensions(added_node_ids=added_node_ids)
# add new nodes
self.nodes = np.append(self.nodes, added_node_ids)
self.reactions.extend([rxn.shift(shift) for rxn in gene.reactions])
# update dictionaries
self.off_states.update({k: v+shift for k,v in gene.off_states.items()})
self.on_states.update({k: v+shift for k,v in gene.on_states.items()})
self.transcripts.update({k: v+shift for k,v in gene.transcripts.items()})
self.proteins.update({k: v+shift for k,v in gene.proteins.items()})
def add_activation(self,
gene,
activator,
k=1,
atp_sensitive=False,
carbon_sensitive=False,
ribosome_sensitive=False,
**labels
):
"""
Add gene activation reaction.
Args:
gene (str) - target gene name
activator (str) - names of activating protein
k (float) - activation rate constant
labels (dict) - additional labels for reaction
"""
# define reaction name
labels['name'] = gene+' activation'
# define stoichiometry
stoichiometry = | np.zeros(self.nodes.size, dtype=np.int64) | numpy.zeros |
import numpy as np
import random
from math import pow, gamma, sin, pi
from functions import dist_from_zero, rastrigin, booth, Matyas, Rosenbrock, THC, McCormic
class PSOM:
def __init__(self, num_particles, w_v, function, p1 = 0.1, p2 = 0.1,lbd = 1.5, num = 5):
self.num_particles = num_particles
self.generate_values = function.generate_values
self.particles = self.generate_values(num_particles)
self.cost = function.eval
self.shape = function.shape
self.d = function.d
self.current_cost = self.evaluate()
self.min_particle = np.copy(self.particles)
self.min_cost = np.copy(self.particles[np.argmin(self.current_cost)])
self.velocities = np.array([np.random.rand(*self.shape)*2*self.d - self.d for i in range(num_particles)])
self.w_v = w_v
self.p1 = p1
self.p2 = p2
self.lbd = lbd
self.value = np.power((gamma(1+self.lbd)*sin(pi*self.lbd/2)/(gamma((1+self.lbd)/2)*self.lbd*np.power(2,((self.lbd-1)/2)))),(1/self.lbd))
self.num = num
def lambda_func(self):
return np.array(0.01 * np.random.rand(*self.shape)*self.d * self.value)/np.power(np.abs(np.random.rand(*self.shape)*self.d),1/ self.lbd)
def evaluate(self):
current_cost = list()
for i in self.particles:
current_cost.append(self.cost(i))
return np.array(current_cost)
def optimize(self, num_iterations):
for iter in range(num_iterations):
for i in range(len(self.particles)):
r1 = np.random.rand(*self.shape)
r2 = np.random.rand(*self.shape)
self.velocities[i] = (self.w_v * self.velocities[i]) + (self.lambda_func() * r1 * (self.min_particle[i] - self.particles[i])) + (self.lambda_func() * (self.min_cost - self.particles[i]))
self.particles[i] += self.velocities[i]
self.particles[i] = np.clip(self.particles[i], -self.d, self.d)
self.velocities[i] = np.clip(self.velocities[i], -self.d, self.d)
if(self.cost(self.particles[i]) < self.cost(self.min_particle[i])):
self.min_particle[i] = np.copy(self.particles[i])
new_values = np.argsort(self.evaluate())[-int(self.p1 * self.num_particles)-int(self.p2 * self.num_particles):-int(self.p2 * self.num_particles)]
modified_values = np.argsort(self.evaluate())[-int(self.p2 * self.num_particles):]
if(iter%self.num == 0):
self.particles[new_values] = self.generate_values(int(self.p1 * self.num_particles))
self.velocities[new_values] = np.array([ | np.random.rand(*self.shape) | numpy.random.rand |
from glob import glob
import pandas as pd
import sys
import ntpath
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from nltk.tokenize import RegexpTokenizer
from skimage.transform import resize
import os
import errno
checkpoint = '2018_10_23_14_55_12'
gen_dir = '../data/coco/gen_masks_%s/*'%(checkpoint)
gt_dir = '../data/coco/masks/'
img_dir = '../data/coco/images/'
txt_dir = '../data/coco/text/'
output_dir = '../vis_bboxes_%s/'%(checkpoint)
CAPS_PER_IMG = 5
FONT_MAX = 40
FONT_REAL = 30
MAX_WORD_NUM = 20
FNT = ImageFont.truetype('../data/coco/share/Pillow/Tests/fonts/FreeMono.ttf', FONT_REAL)
STD_IMG_SIZE = 256
VIS_SIZE = STD_IMG_SIZE
OFFSET = 2
SHOW_LIMIT = 500
def path_leaf(path):
return ntpath.basename(path)
def load_captions(cap_path):
all_captions = []
with open(cap_path, "r") as f:
captions = f.read().decode('utf8').split('\n')
cnt = 0
for cap in captions:
if len(cap) == 0:
continue
cap = cap.replace("\ufffd\ufffd", " ")
# picks out sequences of alphanumeric characters as tokens
# and drops everything else
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(cap.lower())
# print('tokens', tokens)
if len(tokens) == 0:
print('cap', cap)
continue
tokens_new = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0:
tokens_new.append(t)
sentence = ' '.join(tokens_new)
all_captions.append(sentence)
cnt += 1
if cnt == CAPS_PER_IMG:
break
if cnt < CAPS_PER_IMG:
print('ERROR: the captions for %s less than %d'
% (cap_path, cnt))
return all_captions
def draw_plate(bboxes):
bbox_plate = Image.fromarray((np.ones((VIS_SIZE, VIS_SIZE, 3))*255).astype(np.uint8))
if bboxes is None:
return bbox_plate
d = ImageDraw.Draw(bbox_plate)
for i in xrange(bboxes.shape[0]):
left, top, width, height, label = bboxes[i, :5]
label = int(label)
color = (210-label*2,label*3,50+label*2)
d.rectangle([left, top, left+width-1, top+height-1], outline=color)
d.text([left+5, top+5], str(label), fill=color)
del d
return bbox_plate
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
mkdir_p(output_dir)
gen_paths = glob(gen_dir)
keys = [path_leaf(gen_path) for gen_path in gen_paths]
count = 0
for key in keys:
if count >= SHOW_LIMIT:
break
# 1. load image
img_path = '%s%s.jpg'%(img_dir, key)
img = np.array(Image.open(img_path))
img_height, img_width = img.shape[0], img.shape[1]
height_scale = STD_IMG_SIZE/float(img_height)
width_scale = STD_IMG_SIZE/float(img_width)
img = resize(img, [STD_IMG_SIZE, STD_IMG_SIZE])
img = Image.fromarray((img*255).astype(np.uint8))
# 2. load captions
cap_path = '%s%s.txt'%(txt_dir, key)
captions = load_captions(cap_path)
# 3. load gt bboxes
gt_bbox_path = '%s%s/boxes.txt'%(gt_dir, key)
if is_non_zero_file(gt_bbox_path):
gt_boxes = pd.read_csv(gt_bbox_path, header=None).astype(int)
gt_boxes = np.array(gt_boxes)
gt_boxes[:,[0,2]] = gt_boxes[:,[0,2]]*width_scale
gt_boxes[:,[1,3]] = gt_boxes[:,[1,3]]* height_scale
else:
gt_boxes = None
# 4. load gen bboxes
gen_bbox_paths = glob('%s%s/*'%(gen_dir, key))
gen_bbox_paths_indices = [int(path_leaf(gen_bbox_path)) for gen_bbox_path in gen_bbox_paths]
gen_bbox_paths_indices = | np.argsort(gen_bbox_paths_indices) | numpy.argsort |
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `geometry` module."""
from __future__ import division
import logging
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal
from scipy.spatial import Delaunay
from metpy.interpolate.geometry import (area, circumcenter, circumcircle_radius, dist_2,
distance, find_local_boundary, find_natural_neighbors,
find_nn_triangles_point, get_point_count_within_r,
get_points_within_r, order_edges, triangle_area)
logging.getLogger('metpy.interpolate.geometry').setLevel(logging.ERROR)
def test_get_points_within_r():
r"""Test get points within a radius function."""
x = list(range(10))
y = list(range(10))
center = [1, 5]
radius = 5
matches = get_points_within_r(center, list(zip(x, y)), radius).T
truth = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
assert_array_almost_equal(truth, matches)
def test_get_point_count_within_r():
r"""Test get point count within a radius function."""
x = list(range(10))
y = list(range(10))
center1 = [1, 5]
center2 = [12, 10]
radius = 5
count = get_point_count_within_r([center1, center2], list(zip(x, y)), radius)
truth = np.array([5, 2])
assert_array_almost_equal(truth, count)
def test_triangle_area():
r"""Test area of triangle function."""
pt0 = [0, 0]
pt1 = [10, 10]
pt2 = [10, 0]
truth = 50.0
t_area = triangle_area(pt0, pt1, pt2)
assert_almost_equal(truth, t_area)
# what if two points are the same? Its a line!
pt0 = [0, 0]
pt1 = [0, 0]
pt2 = [10, 0]
truth = 0
t_area = triangle_area(pt0, pt1, pt2)
assert_almost_equal(truth, t_area)
def test_dist_2():
r"""Test squared distance function."""
x0 = 0
y0 = 0
x1 = 10
y1 = 10
truth = 200
dist2 = dist_2(x0, y0, x1, y1)
assert_almost_equal(truth, dist2)
def test_distance():
r"""Test distance function."""
pt0 = [0, 0]
pt1 = [10, 10]
truth = 14.14213562373095
dist = distance(pt0, pt1)
assert_almost_equal(truth, dist)
def test_circumcircle_radius():
r"""Test circumcircle radius function."""
pt0 = [0, 0]
pt1 = [10, 10]
pt2 = [10, 0]
cc_r = circumcircle_radius(pt0, pt1, pt2)
truth = 7.07
assert_almost_equal(truth, cc_r, decimal=2)
def test_circumcircle_radius_degenerate():
"""Test that circumcircle_radius handles a degenerate triangle."""
pt0 = [0, 0]
pt1 = [10, 10]
pt2 = [0, 0]
assert np.isnan(circumcircle_radius(pt0, pt1, pt2))
def test_circumcenter():
r"""Test circumcenter function."""
pt0 = [0, 0]
pt1 = [10, 10]
pt2 = [10, 0]
cc = circumcenter(pt0, pt1, pt2)
truth = [5., 5.]
assert_array_almost_equal(truth, cc)
def test_find_natural_neighbors():
r"""Test find natural neighbors function."""
x = list(range(0, 20, 4))
y = list(range(0, 20, 4))
gx, gy = | np.meshgrid(x, y) | numpy.meshgrid |
from __future__ import print_function
import h5py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
try:
import moxing as mox
import npu_bridge
mox.file.shift('os', 'mox')
h5py_File_class = h5py.File
class OBSFile(h5py_File_class):
def __init__(self, name, *args, **kwargs):
self._tmp_name = None
self._target_name = name
if name.startswith('obs://') or name.startswith('s3://'):
self._tmp_name = os.path.join('cache', 'h5py_tmp',
name.replace('/', '_'))
if mox.file.exists(name):
mox.file.copy(name, self._tmp_name)
name = self._tmp_name
super(OBSFile, self).__init__(name, *args, **kwargs)
def close(self):
if self._tmp_name:
mox.file.copy(self._tmp_name, self._target_name)
super(OBSFile, self).close()
setattr(h5py, 'File', OBSFile)
except:
pass
import argparse
import glob
import time
import numpy as np
import scipy.io
import tensorflow as tf
from PIL import Image
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
from network import network
_errstr = "Mode is unknown or incompatible with input array shape."
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def toimage(arr,
high=255,
low=0,
cmin=None,
cmax=None,
pal=None,
mode=None,
channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = np.asarray(arr)
if np.iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and ((3 in shape) or
(4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(np.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data,
high=high,
low=low,
cmin=cmin,
cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (np.arange(0, 256, 1, dtype=np.uint8)[:, np.newaxis] *
np.ones((3, ), dtype=np.uint8)[np.newaxis, :])
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = np.amin(np.ravel(data))
if cmax is None:
cmax = np.amax(np.ravel(data))
data = (data * 1.0 - cmin) * (high - low) / (cmax - cmin) + low
if mode == 'I':
data32 = data.astype(np.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = np.flatnonzero( | np.asarray(shape) | numpy.asarray |
import rospy
import enum
import time
import numpy as np
import tf
import copy
from math import degrees, radians, cos
from control_node import HiwinRobotInterface
from collision_avoidance.srv import collision_avoid, collision_avoidRequest
from hand_eye.srv import eye2base, eye2baseRequest
from hand_eye.srv import save_pcd, save_pcdRequest
from avoidance_mission.srv import snapshot, snapshotRequest, snapshotResponse
from tool_angle.srv import tool_angle, tool_angleRequest
pic_pos = \
[[11., 27., 14., 179.948, 10.215, -0.04],
[11., 11., 14., -155.677, 9.338, 4.16],
[11., 45., 15., 162.071, 8.982, -2.503],
[20., 29., 13., -179.401, 20.484, 0.484],
[-1., 27., 10., 178.176, -5.075, -0.821],
[11., 30., 3., 176.897, 9.752, -0.733],
[11., 48., 0., 147.166, 8.127, -5.457],
[11., 14., -1., -136.398, 7.255, 6.574],
[7., 26., -2., 179.442, -22.966, -0.352],
[20., 26., 0., 179.502, 41.557, -0.951]]
class Arm_status(enum.IntEnum):
Idle = 1
Isbusy = 2
class State(enum.IntEnum):
move2pic = 0
take_pic = 1
move2objup = 2
move2obj = 3
move2binup = 4
move2placeup = 5
place = 6
finish = 7
get_objinfo = 8
placeup = 9
move2bin_middleup = 10
move2placeup1 = 11
pick_obj = 12
class EasyCATest:
def __init__(self):
self.arm_move = False
self.monitor_suc = False
self.state = State.move2pic
self.pic_pos = np.array(pic_pos)
self.pic_pos_indx = 0
self.target_obj = []
self.place_pos_left = np.array([-2.6175, -15.7, -29, 180, 0, 0])##
self.place_pos_right = np.array([-2.6236, -26.8867, -29, 180, 0, 0])##
self.dis_trans = np.mat(np.identity(4))
self.right_side = False
self.stop_flg = False
def hand_eye_client(self, req):
rospy.wait_for_service('/robot/eye_trans2base')
try:
ez_ca = rospy.ServiceProxy('/robot/eye_trans2base', eye2base)
res = ez_ca(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def get_pcd_client(self, req):
rospy.wait_for_service('/get_pcd')
try:
get_pcd = rospy.ServiceProxy('/get_pcd', save_pcd)
res = get_pcd(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def CA_client(self, req):
rospy.wait_for_service('/robot/_CA')
try:
ca = rospy.ServiceProxy('/robot/_CA', collision_avoid)
res = ca(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def get_obj_client(self, req):
rospy.wait_for_service('/AlignPointCloud')
try:
get_obj = rospy.ServiceProxy('/AlignPointCloud', snapshot)
res = get_obj(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def tool_client(self, req):
rospy.wait_for_service('/tool/tool_angle')
try:
tool = rospy.ServiceProxy('/tool/tool_angle', tool_angle)
res = tool(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def check_side(self, trans):
if abs(np.dot(np.array(trans[:3, 2]).reshape(-1), [0, 0, 1])) > 0.7:
if trans[2,2] > 0:
transform = tf.transformations.euler_matrix(0, radians(180), 0, axes='sxyz')
trans = trans * transform
r_side = True
else:
r_side = False
else:
vec_cen2obj = trans[:2, 3].reshape(-1) - | np.array([0.2,0.3]) | numpy.array |
from typing import Callable
import numpy
def calc_iint(
beam_polarization: float, flipper_efficiency: float, f_nucl, f_m_perp, matrix_u, func_extinction: Callable = None,
flag_beam_polarization: bool = False, flag_flipper_efficiency: bool = False,
flag_f_nucl: bool = False, flag_f_m_perp: bool = False,
dict_in_out: dict = None, flag_use_precalculated_data: bool = False):
"""Calculate integrated intensities.
It is supposed that crystal is not rotate during the calculations
(orientation matrix is the same for all reflections)
"""
if dict_in_out is None:
flag_dict = False
dict_in_out_keys = []
else:
flag_dict = True
dict_in_out_keys = dict_in_out.keys()
p_u = beam_polarization
p_d = beam_polarization*(2*flipper_efficiency-1)
flag_f_plus_sq = flag_f_nucl or flag_f_m_perp
flag_f_minus_sq = flag_f_nucl or flag_f_m_perp
flag_f_m_perp_xy_sq = flag_f_m_perp
f_n = numpy.atleast_1d(f_nucl)
f_m_perp = numpy.atleast_1d(f_m_perp)
f_n_sq = numpy.square(numpy.abs(f_n))
f_m_perp_x = f_m_perp[0]*matrix_u[0] + f_m_perp[1]*matrix_u[1] + f_m_perp[2]*matrix_u[2]
f_m_perp_y = f_m_perp[0]*matrix_u[3] + f_m_perp[1]*matrix_u[4] + f_m_perp[2]*matrix_u[5]
f_m_perp_z = f_m_perp[0]*matrix_u[6] + f_m_perp[1]*matrix_u[7] + f_m_perp[2]*matrix_u[8]
f_m_perp_x_sq = numpy.square(numpy.abs(f_m_perp_x))
f_m_perp_y_sq = numpy.square(numpy.abs(f_m_perp_y))
f_m_perp_z_sq = numpy.square(numpy.abs(f_m_perp_z))
f_n_f_m_perp_z = 2.*(f_n.real * f_m_perp_z.real + f_n.imag * f_m_perp_z.imag)
f_plus_sq = f_n_sq + f_m_perp_z_sq + f_n_f_m_perp_z
f_minus_sq = f_n_sq + f_m_perp_z_sq - f_n_f_m_perp_z
f_m_perp_xy_sq = f_m_perp_x_sq + f_m_perp_y_sq
if func_extinction is None:
dder_y_plus, dder_y_minus, dder_y_m_perp_xy = {}, {}, {}
y_plus = numpy.ones_like(f_plus_sq)
y_minus = numpy.ones_like(f_minus_sq)
y_m_perp_xy = numpy.ones_like(f_m_perp_xy_sq)
else:
y_plus, dder_y_plus = func_extinction(f_plus_sq, flag_f_sq=flag_f_plus_sq)
y_minus, dder_y_minus = func_extinction(f_minus_sq, flag_f_sq=flag_f_minus_sq)
y_m_perp_xy, dder_y_m_perp_xy = func_extinction(f_m_perp_xy_sq, flag_f_sq=flag_f_m_perp_xy_sq)
chiral_term = 2.*(f_m_perp_x.imag * f_m_perp_y.real - f_m_perp_x.real * f_m_perp_y.imag)
if flag_dict:
dict_in_out["chiral_term"] = chiral_term
iint_plus = 0.5*((1.+p_u)*y_plus*f_plus_sq +
(1.-p_u)*y_minus*f_minus_sq) + \
y_m_perp_xy * f_m_perp_xy_sq + \
p_u * chiral_term
iint_minus = 0.5*((1.-p_d)*y_plus*f_plus_sq +
(1.+p_d)*y_minus*f_minus_sq) + \
y_m_perp_xy * f_m_perp_xy_sq - \
p_d * chiral_term
if flag_dict:
dict_in_out["iint_plus"] = iint_plus
dict_in_out["iint_minus"] = iint_minus
dict_in_out["y_plus"] = y_plus
dict_in_out["f_plus_sq"] = f_plus_sq
dict_in_out["y_minus"] = y_minus
dict_in_out["f_minus_sq"] = f_minus_sq
dict_in_out["y_m_perp_xy"] = y_m_perp_xy
dict_in_out["f_m_perp_xy_sq"] = f_m_perp_xy_sq
dder_plus = {}
dder_minus = {}
if flag_beam_polarization:
dder_plus["beam_polarization"] = 0.5*(y_plus*f_plus_sq - y_minus*f_minus_sq + 2.*chiral_term) * \
numpy.ones_like(beam_polarization)
dder_minus["beam_polarization"] = 0.5*(-y_plus*f_plus_sq + y_minus*f_minus_sq - 2.*chiral_term) * \
numpy.ones_like(beam_polarization)*(2.*flipper_efficiency-1.)
if flag_flipper_efficiency:
dder_minus["flipper_efficiency"] = beam_polarization*(-y_plus*f_plus_sq + y_minus*f_minus_sq - 2.*chiral_term) * \
numpy.ones_like(flipper_efficiency)
if flag_f_nucl:
f_plus_sq_f_n_real = 2. * (f_n.real + f_m_perp_z.real) * numpy.ones_like(f_n.real)
f_plus_sq_f_n_imag = 2. * (f_n.imag + f_m_perp_z.imag) * numpy.ones_like(f_n.imag)
f_minus_sq_f_n_real = 2. * (f_n.real - f_m_perp_z.real)* numpy.ones_like(f_n.real)
f_minus_sq_f_n_imag = 2. * (f_n.imag - f_m_perp_z.imag) * numpy.ones_like(f_n.imag)
y_plus_f_n_real, y_minus_f_n_real = 0, 0
y_plus_f_n_imag, y_minus_f_n_imag = 0, 0
if "f_sq" in dder_y_plus.keys():
y_plus_f_n_real = dder_y_plus["f_sq"]*f_plus_sq_f_n_real
y_plus_f_n_imag = dder_y_plus["f_sq"]*f_plus_sq_f_n_imag
if "f_sq" in dder_y_minus.keys():
y_minus_f_n_real = dder_y_minus["f_sq"]*f_minus_sq_f_n_real
y_minus_f_n_imag = dder_y_minus["f_sq"]*f_minus_sq_f_n_imag
dder_plus["f_nucl_real"] = 0.5*(
(1.+p_u)*(y_plus*f_plus_sq_f_n_real+y_plus_f_n_real*f_plus_sq) +
(1.-p_u)*(y_minus*f_minus_sq_f_n_real+y_minus_f_n_real*f_minus_sq))
dder_plus["f_nucl_imag"] = 0.5*(
(1.+p_u)*(y_plus*f_plus_sq_f_n_imag+y_plus_f_n_imag*f_plus_sq) +
(1.-p_u)*(y_minus*f_minus_sq_f_n_imag+y_minus_f_n_imag*f_minus_sq))
dder_minus["f_nucl_real"] = 0.5*(
(1.-p_d)*(y_plus*f_plus_sq_f_n_real+y_plus_f_n_real*f_plus_sq) +
(1.+p_d)*(y_minus*f_minus_sq_f_n_real+y_minus_f_n_real*f_minus_sq))
dder_minus["f_nucl_imag"] = 0.5*(
(1.-p_d)*(y_plus*f_plus_sq_f_n_imag+y_plus_f_n_imag*f_plus_sq) +
(1.+p_d)*(y_minus*f_minus_sq_f_n_imag+y_minus_f_n_imag*f_minus_sq))
if flag_f_m_perp:
f_plus_sq_f_m_perp_z_real = 2. * (f_n.real + f_m_perp_z.real) * numpy.ones_like(f_m_perp_z.real)
f_plus_sq_f_m_perp_z_imag = 2. * (f_n.imag + f_m_perp_z.imag) * numpy.ones_like(f_m_perp_z.imag)
f_minus_sq_f_m_perp_z_real = -2. * (f_n.real - f_m_perp_z.real) * numpy.ones_like(f_m_perp_z.real)
f_minus_sq_f_m_perp_z_imag = -2. * (f_n.imag - f_m_perp_z.imag) * numpy.ones_like(f_m_perp_z.imag)
f_m_perp_xy_sq_f_m_perp_x_real = 2 * f_m_perp_x.real * numpy.ones_like(f_m_perp_x.real)
f_m_perp_xy_sq_f_m_perp_x_imag = 2 * f_m_perp_x.imag * numpy.ones_like(f_m_perp_x.imag)
f_m_perp_xy_sq_f_m_perp_y_real = 2 * f_m_perp_y.real * numpy.ones_like(f_m_perp_y.real)
f_m_perp_xy_sq_f_m_perp_y_imag = 2 * f_m_perp_y.imag * numpy.ones_like(f_m_perp_y.imag)
chiral_term_f_m_perp_x_real = -2 * f_m_perp_y.imag * numpy.ones_like(f_m_perp_x.real)
chiral_term_f_m_perp_x_imag = 2 * f_m_perp_y.real * | numpy.ones_like(f_m_perp_x.imag) | numpy.ones_like |
from abc import ABCMeta
from abc import abstractmethod
import torch
import numpy as np
from torch import stack as tstack
from mvt.utils import bbox3d_util
# general
def torch_to_np_dtype(ttype):
"""convert torch to numpy"""
type_map = {
torch.float16: np.dtype(np.float16),
torch.float32: np.dtype(np.float32),
torch.float64: np.dtype(np.float64),
torch.int32: np.dtype(np.int32),
torch.int64: np.dtype(np.int64),
torch.uint8: np.dtype(np.uint8),
}
return type_map[ttype]
# Box
def voxel_box_encode(boxes, anchors):
"""
box encode for voxel-net
:param boxes: [N, 7] Tensor, normal boxes: x, y, z, l, w, h, r
:param anchors: [N, 7] Tensor, anchors
:param encode_angle_to_vector: whether encoding angle to vector
:param smooth_dim: whether using smooth dim
:return: encoded boxes
"""
box_ndim = anchors.shape[-1]
cas, cgs = [], []
if box_ndim > 7:
xa, ya, za, la, wa, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, lg, wg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
else:
xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1)
xg, yg, zg, lg, wg, hg, rg = torch.split(boxes, 1, dim=-1)
la = torch.clamp(la, 1e-3, 1e3)
wa = torch.clamp(wa, 1e-3, 1e3)
ha = torch.clamp(ha, 1e-3, 1e3)
lg = torch.clamp(la, 1e-3, 1e3)
wg = torch.clamp(wa, 1e-3, 1e3)
hg = torch.clamp(ha, 1e-3, 1e3)
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
cts = [g - a for g, a in zip(cgs, cas)]
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
return torch.cat([xt, yt, zt, lt, wt, ht, rt, *cts], dim=-1)
def voxel_box_decode(box_encodings, anchors):
"""
box decode for pillar-net in lidar
:param box_encodings: [N, 7] Tensor, normal boxes: x, y, z, w, l, h, r
:param anchors: [N, 7] Tensor, anchors
:param encode_angle_to_vector: whether encoding angle to vector
:param smooth_dim: whether using smooth dim
:return: decoded boxes
"""
box_ndim = anchors.shape[-1]
cas, cts = [], []
if box_ndim > 7:
xa, ya, za, la, wa, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, lt, wt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xa, ya, za, la, wa, ha, ra = torch.split(anchors, 1, dim=-1)
xt, yt, zt, lt, wt, ht, rt = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, lg, wg, hg, rg, *cgs], dim=-1)
def corners_nd(dims, origin=0.5):
"""
generate relative box corners based on length per dim and origin point.
:param dims: float array, shape=[N, ndim], array of length per dim
:param origin: list or array or float, origin point relate to smallest point.
:return: float array, shape=[N, 2 ** ndim, ndim], returned corners.
point layout example, (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
dtype = torch_to_np_dtype(dims.dtype)
if isinstance(origin, float):
origin = [origin] * ndim
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start from minimum point
# for 3d boxes, please draw them by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - | np.array(origin, dtype=dtype) | numpy.array |
import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.join('../'))
from lib.BeamDynamicsTools.Boundary import Boundary
from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF
from lib.BeamDynamicsTools.Trajectory import Trajectory
from lib.BeamDynamicsTools.Beam import Beam
import pylab as pl
# ===============================================================================
# Calculates Spread in trajectory for non gaussian beam energy distribution
# ===============================================================================
# Define np.array of injection angles
# (x,y,z) = (1.798m, -0.052m, 0.243m)
# alpha = 12.6 degrees (X-Z plane)
# beta = 8.0 degrees (X-Y plane)
alpha0 = 12.6
beta0 = 8.0
alpha = alpha0 / 180.0 * np.pi
beta = beta0 / 180.0 * np.pi
print(alpha, beta)
Rinjection = [1.798, -0.052, 0.243]
Vinjection = [-np.cos(alpha) * np.cos(beta), | np.cos(alpha) | numpy.cos |
# Copyright 2022 The Balsa Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Balsa simulation agent."""
import collections
import copy
import hashlib
import os
import pickle
import time
from absl import app
from absl import logging
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
import torch
import torch.nn.functional as F
import balsa
from balsa import costing
from balsa import envs
from balsa import experience
from balsa import hyperparams
from balsa import models
from balsa import optimizer as balsa_opt
from balsa import search
from balsa.util import dataset as ds
from balsa.util import plans_lib
from balsa.util import postgres
import train_utils
class SimModel(pl.LightningModule):
def __init__(self,
use_tree_conv,
query_feat_dims,
plan_feat_dims,
mlp_hiddens,
tree_conv_version=None,
loss_type=None,
torch_invert_cost=None,
query_featurizer=None,
perturb_query_features=False):
super().__init__()
assert loss_type in [None, 'mean_qerror'], loss_type
self.save_hyperparameters()
self.use_tree_conv = use_tree_conv
if use_tree_conv:
self.tree_conv = models.treeconv.TreeConvolution(
feature_size=query_feat_dims,
plan_size=plan_feat_dims,
label_size=1,
version=tree_conv_version)
else:
self.mlp = balsa.models.MakeMlp(input_size=query_feat_dims +
plan_feat_dims,
num_outputs=1,
hiddens=mlp_hiddens,
activation='relu')
self.loss_type = loss_type
self.torch_invert_cost = torch_invert_cost
self.query_featurizer = query_featurizer
self.perturb_query_features = perturb_query_features
def forward(self, query_feat, plan_feat, indexes=None):
if self.use_tree_conv:
return self.tree_conv(query_feat, plan_feat, indexes)
return self.mlp(torch.cat([query_feat, plan_feat], -1))
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=3e-3)
return optimizer
def training_step(self, batch, batch_idx):
loss = self._ComputeLoss(batch)
result = pl.TrainResult(minimize=loss)
result.log('train_loss', loss, prog_bar=True)
return result
def validation_step(self, batch, batch_idx):
val_loss = self._ComputeLoss(batch)
result = pl.EvalResult(checkpoint_on=val_loss, early_stop_on=val_loss)
result.log('val_loss', val_loss, prog_bar=True)
return result
def _ComputeLoss(self, batch):
query_feat, plan_feat, *rest = batch
target = rest[-1]
if self.training and self.perturb_query_features is not None:
# No-op for non-enabled featurizers.
query_feat = self.query_featurizer.PerturbQueryFeatures(
query_feat, distribution=self.perturb_query_features)
if self.use_tree_conv:
assert len(rest) == 2
output = self.forward(query_feat, plan_feat, rest[0])
else:
assert len(rest) == 1
output = self.forward(query_feat, plan_feat)
if self.loss_type == 'mean_qerror':
output_inverted = self.torch_invert_cost(output.reshape(-1,))
target_inverted = self.torch_invert_cost(target.reshape(-1,))
return train_utils.QErrorLoss(output_inverted, target_inverted)
return F.mse_loss(output.reshape(-1,), target.reshape(-1,))
def on_after_backward(self):
if self.global_step % 50 == 0:
norm_dict = self.grad_norm(norm_type=2)
total_norm = norm_dict['grad_2.0_norm_total']
self.logger.log_metrics({'total_grad_norm': total_norm},
step=self.global_step)
class SimQueryFeaturizer(plans_lib.Featurizer):
"""Implements the query featurizer.
Query node -> [ multi-hot of what tables are present ]
* [ each-table's selectivities ]
"""
def __init__(self, workload_info):
self.workload_info = workload_info
def __call__(self, node):
vec = np.zeros(self.dims, dtype=np.float32)
# Joined tables: [table: 1].
joined = node.leaf_ids()
for rel_id in joined:
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
vec[idx] = 1.0
# Filtered tables.
table_id_to_name = lambda table_id: table_id.split(' ')[0] # Hack.
for rel_id, est_rows in node.info['all_filters_est_rows'].items():
if rel_id not in joined:
# Due to the way we copy Nodes and populate this info field,
# leaf_ids() might be a subset of info['all_filters_est_rows'].
continue
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
total_rows = self.workload_info.table_num_rows[table_id_to_name(
rel_id)]
# NOTE: without ANALYZE, for some reason this predicate is
# estimated to have 703 rows, whereas the table only has 4 rows:
# (kind IS NOT NULL) AND ((kind)::text <> 'production
# companies'::text)
# With ANALYZE run, this assert passes.
assert est_rows >= 0 and est_rows <= total_rows, (node.info,
est_rows,
total_rows)
vec[idx] = est_rows / total_rows
return vec
def PerturbQueryFeatures(self, query_feat, distribution):
"""Randomly perturbs a query feature vec returned by __call__()."""
selectivities = query_feat
# Table: for each chance of each joined table being perturbed:
# % of original query features kept
# mean # tables scaled
#
# 0.5: ~3% original; mean # tables scaled 3.6
# 0.3: ~10.5% original; mean # tables scaled 2.1
# 0.25: ~13.9-16.6% original; mean # tables scaled 1.8-1.9
# 0.2: ~23.6% original; mean # tables scaled 1.5
#
# % kept original:
# ((multipliers > 1).sum(1) == 0).sum().float() / len(multipliers)
# Mean # tables scaled:
# (multipliers > 1).sum(1).float().mean()
#
# "Default": chance = 0.25, unif = [0.5, 2].
chance, unif = distribution
should_scale = torch.rand(selectivities.shape,
device=selectivities.device) < chance
# The non-zero entries are joined tables.
should_scale *= (selectivities > 0)
# Sample multipliers ~ Unif[l, r].
multipliers = torch.rand(
selectivities.shape,
device=selectivities.device) * (unif[1] - unif[0]) + unif[0]
multipliers *= should_scale
# Now, the 0 entries mean "should not scale", which needs to be
# translated into using a multiplier of 1.
multipliers[multipliers == 0] = 1
# Perturb.
new_selectivities = torch.clamp(selectivities * multipliers, max=1)
return new_selectivities
@property
def dims(self):
return len(self.workload_info.rel_ids)
class SimQueryFeaturizerV2(SimQueryFeaturizer):
"""Concat SimQueryFeaturizer's output with indicators of filtered columns.
Query feature vec
= [each table: selectivity (0 if non-joined)]
concat [bools of filtered cols].
"""
def __call__(self, node):
parent_vec = super().__call__(node)
num_tables = len(self.workload_info.rel_ids)
filtered_attrs = node.GetFilteredAttributes()
for attr in filtered_attrs:
idx = np.where(self.workload_info.all_attributes == attr)[0][0]
parent_vec[num_tables + idx] = 1.0
return parent_vec
@property
def dims(self):
return len(self.workload_info.rel_ids) + len(
self.workload_info.all_attributes)
class SimQueryFeaturizerV3(SimQueryFeaturizer):
"""[table->bool] concat [filtered col->selectivity]."""
def __call__(self, node):
vec = np.zeros(self.dims, dtype=np.float32)
# Joined tables: [table: 1].
joined = node.leaf_ids()
for rel_id in joined:
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
vec[idx] = 1.0
num_tables = len(self.workload_info.rel_ids)
# Filtered cols.
rel_id_to_est_rows = node.info['all_filters_est_rows']
leaves = node.GetLeaves()
for leaf in leaves:
leaf_filters = leaf.GetFilters()
if not leaf_filters:
continue
# PG's parser groups all pushed-down filters by table.
assert len(leaf_filters) == 1, leaf_filters
leaf_filter = leaf_filters[0]
# Get the overall selectivity of this expr.
table_id = leaf.get_table_id()
expr_est_rows = rel_id_to_est_rows[table_id]
table_name = leaf.get_table_id(with_alias=False)
total_rows = self.workload_info.table_num_rows[table_name]
assert expr_est_rows >= 0 and expr_est_rows <= total_rows, (
node.info, expr_est_rows, total_rows)
table_expr_selectivity = expr_est_rows / total_rows
# Assign this selectivity to all filtered columns in this expr.
# Note that the expr may contain multiple cols & OR, in which case
# we make a simplification to assign the same sel. to all cols.
filtered_attrs = leaf.GetFilteredAttributes()
for attr in filtered_attrs:
idx = np.where(self.workload_info.all_attributes == attr)[0][0]
vec[num_tables + idx] = table_expr_selectivity
return vec
@property
def dims(self):
return len(self.workload_info.rel_ids) + len(
self.workload_info.all_attributes)
class SimQueryFeaturizerV4(plans_lib.Featurizer):
"""Raw estimated rows per table -> log(1+x) -> min_max scaling."""
def __init__(self, workload_info):
self.workload_info = workload_info
self._min = None
self._max = None
self._range = None
self._min_torch = None
self._max_torch = None
self._range_torch = None
def __call__(self, node):
vec = self._FeaturizePreScaling(node)
return (vec - self._min) / self._range
def PerturbQueryFeatures(self, query_feat, distribution):
"""Randomly perturbs a query feature vec returned by __call__()."""
_min = self._min_torch.to(query_feat.device)
_max = self._max_torch.to(query_feat.device)
_range = self._range_torch.to(query_feat.device)
pre_scaling = query_feat * _range + _min
est_rows = torch.exp(pre_scaling) - 1.0
# Chance of each joined table being perturbed.
# 0.5: ~3% original; mean # tables scaled 3.6
# 0.25: ~16.6% original; mean # tables scaled 1.8
# 0.3: ~10.5% original; mean # tables scaled 2.1
#
# % kept original:
# ((multipliers > 1).sum(1) == 0).sum().float() / len(multipliers)
# Mean # tables scaled:
# (multipliers > 1).sum(1).float().mean()
#
# "Default": chance = 0.25, unif = [0.5, 2].
chance, unif = distribution
should_scale = torch.rand(est_rows.shape,
device=est_rows.device) < chance
# The non-zero entries are joined tables.
should_scale *= (est_rows > 0)
# Sample multipliers ~ Unif[l, r].
multipliers = torch.rand(est_rows.shape, device=est_rows.device) * (
unif[1] - unif[0]) + unif[0]
multipliers *= should_scale
# Now, the 0 entries mean "should not scale", which needs to be
# translated into using a multiplier of 1.
multipliers[multipliers == 0] = 1
# Perturb.
new_est_rows = est_rows * multipliers
# Re-perform transforms.
logged = torch.log(1.0 + new_est_rows)
logged_clamped = torch.min(logged, _max)
new_query_feat_transformed = (logged_clamped - _min) / _range
return new_query_feat_transformed
def _FeaturizePreScaling(self, node):
vec = np.zeros(self.dims, dtype=np.float32)
table_id_to_name = lambda table_id: table_id.split(' ')[0] # Hack.
joined = node.leaf_ids()
# Joined tables: [table: rows of table].
for rel_id in joined:
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
total_rows = self.workload_info.table_num_rows[table_id_to_name(
rel_id)]
vec[idx] = total_rows
# Filtered tables: [table: estimated rows of table].
for rel_id, est_rows in node.info['all_filters_est_rows'].items():
if rel_id not in joined:
# Due to the way we copy Nodes and populate this info field,
# leaf_ids() might be a subset of info['all_filters_est_rows'].
continue
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
total_rows = self.workload_info.table_num_rows[table_id_to_name(
rel_id)]
assert est_rows >= 0 and est_rows <= total_rows, (node.info,
est_rows,
total_rows)
vec[idx] = est_rows
# log1p.
return np.log(1.0 + vec)
def Fit(self, nodes):
assert self._min is None and self._max is None, (self._min, self._max)
pre_scaling = np.asarray(
[self._FeaturizePreScaling(node) for node in nodes])
self._min = np.min(pre_scaling, 0)
self._max = np.max(pre_scaling, 0)
self._range = self._max - self._min
# For PerturbQueryFeatures().
self._min_torch = torch.from_numpy(self._min)
self._max_torch = torch.from_numpy(self._max)
self._range_torch = torch.from_numpy(self._range)
logging.info('log(1+est_rows): min {}\nmax {}'.format(
self._min, self._max))
@property
def dims(self):
return len(self.workload_info.rel_ids)
class SimPlanFeaturizer(plans_lib.Featurizer):
"""Implements the plan featurizer.
plan node -> [ multi-hot of tables on LHS ] [ same for RHS ]
"""
def __init__(self, workload_info):
self.workload_info = workload_info
def __call__(self, node):
vec = np.zeros(self.dims, dtype=np.float32)
# Tables on LHS.
for rel_id in node.children[0].leaf_ids():
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
vec[idx] = 1.0
# Tables on RHS.
for rel_id in node.children[1].leaf_ids():
idx = np.where(self.workload_info.rel_ids == rel_id)[0][0]
vec[idx + len(self.workload_info.rel_ids)] = 1.0
return vec
@property
def dims(self):
return len(self.workload_info.rel_ids) * 2
class Sim(object):
"""Balsa simulation."""
@classmethod
def Params(cls):
p = hyperparams.InstantiableParams(cls)
# Train.
p.Define('epochs', 100, 'Maximum training epochs. '\
'Early-stopping may kick in.')
p.Define('gradient_clip_val', 0, 'Clip the gradient norm computed over'\
' all model parameters together. 0 means no clipping.')
p.Define('bs', 2048, 'Batch size.')
# Validation.
p.Define('validate_fraction', 0.1,
'Sample this fraction of the dataset as the validation set. '\
'0 to disable validation.')
# Search, train-time.
p.Define('search', search.DynamicProgramming.Params(),
'Params of the enumeration routine to use for training data.')
# Search space.
p.Define('plan_physical', False,
'Learn and plan physical scans/joins, or just join orders?')
# Infer, test-time.
p.Define('infer_search_method', 'beam_bk', 'Options: beam_bk.')
p.Define('infer_beam_size', 10, 'Beam size.')
p.Define('infer_search_until_n_complete_plans', 1,
'Search until how many complete plans?')
# Workload.
p.Define('workload', envs.JoinOrderBenchmark.Params(),
'Params of the Workload, i.e., a set of queries.')
# Data collection.
p.Define('skip_data_collection_geq_num_rels', None,
'If specified, do not collect data for queries with at '\
'least this many number of relations.')
p.Define(
'generic_ops_only_for_min_card_cost', False,
'If using MinCardCost, whether to enumerate generic ops only.')
p.Define('sim_data_collection_intermediate_goals', True,
'For each query, also collect sim data with intermediate '\
'query goals?')
# Featurizations.
p.Define('plan_featurizer_cls', SimPlanFeaturizer,
'Featurizer to use for plans.')
p.Define('query_featurizer_cls', SimQueryFeaturizer,
'Featurizer to use for queries.')
p.Define('label_transforms', ['log1p', 'standardize'],
'Transforms for labels.')
p.Define('perturb_query_features', None, 'See experiments.')
# Eval.
p.Define('eval_output_path', 'eval-cost.csv',
'Path to write evaluation output into.')
p.Define('eval_latency_output_path', 'eval-latency.csv',
'Path to write evaluation latency output into.')
# Model/loss.
p.Define('tree_conv_version', None, 'Options: None, V2.')
p.Define('loss_type', None, 'Options: None (MSE), mean_qerror.')
return p
@classmethod
def HashOfSimData(cls, p):
"""Gets the hash that should determine the simulation data."""
# Use (a few attributes inside Params, Postgres configs) as hash key.
# Using PG configs is necessary because things like PG version / PG
# optimizer settings affect collected costs.
# NOTE: in theory, other stateful effects such as whether ANALYZE has
# been called on a PG database also affects the collected costs.
_RELEVANT_HPARAMS = [
'search',
'workload',
'skip_data_collection_geq_num_rels',
'generic_ops_only_for_min_card_cost',
'plan_physical',
]
param_vals = [p.Get(hparam) for hparam in _RELEVANT_HPARAMS]
param_vals = [
v.ToText() if isinstance(v, hyperparams.Params) else str(v)
for v in param_vals
]
spec = '\n'.join(param_vals)
if p.search.cost_model.cls is costing.PostgresCost:
# Only PostgresCost would depend on PG configs.
pg_configs = map(str, postgres.GetServerConfigs())
spec += '\n'.join(pg_configs)
hash_sim = hashlib.sha1(spec.encode()).hexdigest()[:8]
return hash_sim
@classmethod
def HashOfFeaturizedData(cls, p):
"""Gets the hash that should determine the final featurized tensors."""
# Hash(HashOfSimData(), featurization specs).
# NOTE: featurized data involves asking Postgres for cardinality
# estimates of filters. So in theory, here the hash calculation should
# depend on postgres.GetServerConfigs(). Most relevant are the PG
# version & whether ANALYZE has been run (this is not tracked by any PG
# config). Here let's make an assumption that all PG versions with
# ANALYZE ran produce the same estimates, which is reasonable because
# they are just histograms.
hash_sim = cls.HashOfSimData(p)
_FEATURIZATION_HPARAMS = [
'plan_featurizer_cls',
'query_featurizer_cls',
]
param_vals = [str(p.Get(hparam)) for hparam in _FEATURIZATION_HPARAMS]
spec = str(hash_sim) + '\n'.join(param_vals)
hash_feat = hashlib.sha1(spec.encode()).hexdigest()[:8]
return hash_feat
def __init__(self, params):
self.params = params.Copy()
p = self.params
# Plumb through same flags.
p.search.plan_physical_ops = p.plan_physical
p.search.cost_model.cost_physical_ops = p.plan_physical
logging.info(p)
# Instantiate search.
self.search = p.search.cls(p.search)
# Instantiate workload.
self.workload = p.workload.cls(p.workload)
wi = self.workload.workload_info
generic_join = np.array(['Join'])
generic_scan = | np.array(['Scan']) | numpy.array |
"""
Functions for loading specific datasets
==============================
"""
import numpy as np
# from __future__ import print_function
def load_radar(proj, day):
"""
Function to load in radar data
Written to allow different load in procedure for different projects
Takes:
'proj' argument, options:
1. moccha
'day' argument, options:
1. date in format YYYYMMDD (string)
2. all (string), loads all data (20180814 to 20180914)
Use example:
data = load_radar('moccha', 'all')
"""
from netCDF4 import Dataset
from time_functions import calcTime_Date2DOY
if proj == 'moccha':
### choose variables to load
var_list = ['time','range','Zh']
### make empty data dictionary
data = {}
temp = {}
if day != 'all':
### define filename
data_dir = '/home/gillian/MOCCHA/ODEN/DATA/'
filename = data_dir + 'mmcr/' + day + '_oden_mira.nc'
### load file
nc = Dataset(filename,'r')
### populate dictionary
for var in var_list:
data[var] = nc.variables[var][:]
### transpose 2D data so that time is 0th axis
if np.ndim(data[var]) == 2:
data[var] = np.transpose(data[var])
### find date in DOY format
date = calcTime_Date2DOY(day)
### update time array to reference base date
data['time'] = data['time']/24.0 + date
### remove flagged data points
data['Zh'][data['Zh'] == -999.0] = np.nan
nc.close()
else:
### define filename
data_dir = '/home/gillian/MOCCHA/ODEN/DATA/'
moccha_names = ['20180814_oden_','20180815_oden_','20180816_oden_',
'20180817_oden_','20180818_oden_','20180819_oden_','20180820_oden_',
'20180821_oden_','20180822_oden_','20180823_oden_','20180824_oden_',
'20180825_oden_','20180826_oden_','20180827_oden_','20180828_oden_',
'20180829_oden_','20180830_oden_','20180831_oden_','20180901_oden_',
'20180902_oden_','20180903_oden_','20180904_oden_','20180905_oden_',
'20180906_oden_','20180907_oden_','20180908_oden_','20180909_oden_',
'20180911_oden_','20180912_oden_','20180913_oden_','20180914_oden_']
for name in moccha_names:
filename = data_dir + 'mmcr/' + name + 'mira.nc'
### load file
nc = Dataset(filename,'r')
### initialise array with first file
if name == '20180814_oden_':
### populate dictionary
for var in var_list:
data[var] = nc.variables[var][:]
### transpose 2D data so that time is 0th axis
if np.ndim(data[var]) == 2:
data[var] = | np.transpose(data[var]) | numpy.transpose |
import numpy as np
import sys
import pickle
from collections import defaultdict
import itertools
import nltk
import random
# space is included in whitelist
EN_WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz '
EN_BLACKLIST = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\''
DATA_PATH = './data/chat.txt'
# these values determine the length of questions and answers while training
# increase the length to get a better trained model at the cost of more time and resources...
limit = {
'maxq': 20,
'minq': 0,
'maxa': 20,
'mina': 0
}
# increase vocab size for a better trained mmodel at the cost of more time and resource
VOCAB_SIZE = 6000
UNK = 'unk'
def default():
return 1
# read lines from the file
def read_lines(filename):
return open(filename).read().split('\n')[:-1]
# separate sentences in a line
def split_sentences(line):
return line.split('.')
# remove anything that isn't in the vocabulary
def filter_lines(line, whitelist):
return ''.join([ch for ch in line if ch in whitelist])
# read words and create index to word and word to index dictionaries
def index(tokenized_sentences, vocab_size):
# get frequency distribution of the tokenized words which are most used
freq_dist = nltk.FreqDist(itertools.chain(tokenized_sentences))
# get vocabulary of size VOCAB_SIZE
vocab = freq_dist.most_common(vocab_size)
# generate index to word dictionary
index2word = ['_'] + [UNK] + [x[0] for x in vocab]
# generate word to index dictionary
word2index = dict([(w, i) for i, w in enumerate(index2word)])
return index2word, word2index, freq_dist
# filter sequences based on set min length and max length
def filter_data(sequences):
filter_q, filter_a = [], []
raw_data_len = len(sequences) // 2
for i in range(0, len(sequences), 2):
qlen = len(sequences[i].split(' '))
alen = len(sequences[i+1].split(' '))
if qlen >= limit['minq'] and qlen <= limit['maxq']:
if alen >= limit['mina'] and alen <= limit['maxa']:
filter_q.append(sequences[i])
filter_a.append(sequences[i+1])
filter_data_len = len(filter_q)
filter_percent = int((raw_data_len - filter_data_len) / raw_data_len * 100)
print('{} filtered from original data'.format(filter_percent))
return filter_q, filter_a
'''
Replacing words with indices in a sequcnce
Replace with unknown if word not present in vocabulary
'''
def pad_seq(seq, lookup, maxlen):
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
indices.append(lookup[UNK])
return indices + [0] * (maxlen - len(seq))
'''
generating the final dataset by creating and array of indices
and adding zero paddig. Zero Padding is simply a process of
adding layers of zeros to our inputs
'''
def zero_pad(tokenized_q, tokenized_a, word2index):
data_len = len(tokenized_q)
index_q = np.zeros([data_len, limit['maxq']], dtype=np.int32)
index_a = | np.zeros([data_len, limit['maxa']], dtype=np.int32) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 13:37:37 2019 by <NAME> - <EMAIL>
Write this
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision.transforms.functional as TF
from torchvision import transforms
import time
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import argparse
import warnings
import random
from PIL import Image
from Minicity_train import MiniCity_train
from helpers.model import UNet
from helpers.minicity import MiniCity
from helpers.helpers import AverageMeter, ProgressMeter, iouCalc
from model import enc_config
from model import EfficientSeg
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='VIPriors Segmentation baseline training script')
parser.add_argument('--dataset_path', metavar='path/to/minicity/root', default='./minicity',
type=str, help='path to dataset (ends with /minicity)')
parser.add_argument('--colorjitter_factor', metavar='0.3', default=0.3,
type=float, help='data augmentation: color jitter factor')
parser.add_argument('--scale_factor', metavar='0.3', default=0.3,
type=float, help='data augmentation: random scale factor')
parser.add_argument('--hflip', metavar='[True,False]', default=True,
type=float, help='data augmentation: random horizontal flip')
parser.add_argument('--crop_size', metavar='384 768', default=[384,768], nargs="+",
type=int, help='data augmentation: random crop size, height width, space separated')
parser.add_argument('--train_size', metavar='512 1024', default=[384,768], nargs="+",
type=int, help='image size during training, height width, space separated')
parser.add_argument('--test_size', metavar='512 1024', default=[512,1024], nargs="+",
type=int, help='image size during validation and testing, height width, space separated')
parser.add_argument('--batch_size', metavar='5', default=4, type=int, help='batch size')
parser.add_argument('--test_batch_size', metavar='2', default=2, type=int, help='test batch size')
parser.add_argument('--pin_memory', metavar='[True,False]', default=True,
type=bool, help='pin memory on GPU')
parser.add_argument('--num_workers', metavar='8', default=4, type=int,
help='number of dataloader workers')
parser.add_argument('--lr_init', metavar='1e-2', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--lr_min', metavar='1e-5', default=1e-4, type=float,
help='lower bound on learning rate')
parser.add_argument('--lr_patience', metavar='5', default=5, type=int,
help='patience for reduce learning rate on plateau')
parser.add_argument('--lr_momentum', metavar='0.9', default=0.9, type=float,
help='momentum for SGD optimizer')
parser.add_argument('--lr_weight_decay', metavar='1e-4', default=1.e-6, type=float,
help='weight decay for SGD optimizer')
parser.add_argument('--weights', metavar='path/to/checkpoint', default=None,
type=str, help='resume training from checkpoint')
parser.add_argument('--epochs', metavar='200', default=300, type=int,
help='number of training epochs')
parser.add_argument('--seed', metavar='42', default=42, type=int,
help='random seed to use')
parser.add_argument('--dataset_mean', metavar='[0.485, 0.456, 0.406]',
default=[0.2870, 0.3257, 0.2854], type=float,
help='mean for normalization', nargs=3)
parser.add_argument('--dataset_std', metavar='[0.229, 0.224, 0.225]',
default=[0.1879, 0.1908, 0.1880], type=float,
help='std for normalization', nargs=3)
parser.add_argument('--predict', metavar='path/to/weights',
default=None, type=str,
help='provide path to model weights to predict on validation set')
parser.add_argument('--depth_coeff', metavar='1.0', default=1.6, type=float, help='depth coefficient')
parser.add_argument('--width_coeff', metavar='1.0', default=6.0, type=float, help='width coefficient')
"""
def adjust_learning_rate(optimizer, epoch):
lr = 0.5 * args.lr_init * (1 + np.cos(np.pi * (epoch)/ args.epochs ))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
"""
def adjust_learning_rate(optimizer, epoch):
lr = args.lr_init * ( (1 - epoch / args.epochs) ** 0.9 )
for param_group in optimizer.param_groups:
param_group['lr'] = lr
"""
===========
Main method
===========
"""
def weight_init(m):
if isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_uniform(m.weight)
def main():
global args
args = parser.parse_args()
args.train_size = tuple(args.train_size)
args.test_size = tuple(args.test_size)
# Fix seed
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
# Create directory to store run files
if not os.path.isdir('baseline_run'):
os.makedirs('baseline_run/images')
os.makedirs('baseline_run/results_color')
# Load dataset
trainset = MiniCity_train(args.dataset_path, split='train', class_additions=40)
valset = MiniCity(args.dataset_path, split='val', transforms=test_trans)
testset = MiniCity(args.dataset_path, split='val', transforms=test_trans)
dataloaders = {}
dataloaders['val'] = torch.utils.data.DataLoader(valset,
batch_size=args.test_batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
dataloaders['test'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
# Load model
model = EfficientSeg(enc_config=enc_config, dec_config=None, num_classes=len(MiniCity.validClasses),
width_coeff=args.width_coeff)
print(sum(p.numel() for p in model.parameters()))
model.apply(weight_init)
# Define loss, optimizer and scheduler
criterion = nn.CrossEntropyLoss(ignore_index=MiniCity.voidClass,weight=torch.from_numpy(np.array([1.0, #road
1.0, #sidewalk
1.0, #building
2.0, #wall
2.0, #fence
2.0, #pole
1.0, #traffic light
1.0, #traffic sign
1.0, #vegetation
1.0, #terrain
1.0, #sky
1.0, #person
2.0, #rider
1.0, #car
3.0, #truck
3.0, #bus
3.0, #train
2.0, #motorcycle
2.0, #bicycle
2.0] #void
)).float().cuda())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init,
# momentum=args.lr_momentum,
weight_decay=args.lr_weight_decay
)
# Initialize metrics
best_miou = 0.0
metrics = {'train_loss' : [],
'train_acc' : [],
'val_acc' : [],
'val_loss' : [],
'miou' : []}
start_epoch = 0
# Push model to GPU
if torch.cuda.is_available():
model = model.cuda()
print('Model pushed to {} GPU(s), type {}.'.format(torch.cuda.device_count(), torch.cuda.get_device_name(0)))
# Resume training from checkpoint
if args.weights:
print('Resuming training from {}.'.format(args.weights))
checkpoint = torch.load(args.weights)
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
metrics = checkpoint['metrics']
best_miou = checkpoint['best_miou']
start_epoch = checkpoint['epoch']+1
# No training, only running prediction on test set
if args.predict:
checkpoint = torch.load(args.predict)
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
print('Loaded model weights from {}'.format(args.predict))
# Create results directory
if not os.path.isdir('results'):
os.makedirs('results')
predict(dataloaders['test'], model, MiniCity.mask_colors)
return
# Generate log file
with open('baseline_run/log_epoch.csv', 'a') as epoch_log:
epoch_log.write('epoch, train loss, val loss, train acc, val acc, miou\n')
since = time.time()
for epoch in range(start_epoch,args.epochs):
# Train
print('--- Training ---')
train_loss, train_acc = train_epoch(trainset, model,
criterion, optimizer, None,
epoch, void=MiniCity.voidClass)
metrics['train_loss'].append(train_loss)
metrics['train_acc'].append(train_acc)
print('Epoch {} train loss: {:.4f}, acc: {:.4f}'.format(epoch,train_loss,train_acc))
# Validate
print('--- Validation ---')
val_acc, val_loss, miou = validate_epoch(dataloaders['val'],
model,
criterion, epoch,
MiniCity.classLabels,
MiniCity.validClasses,
void=MiniCity.voidClass,
maskColors=MiniCity.mask_colors)
metrics['val_acc'].append(val_acc)
metrics['val_loss'].append(val_loss)
metrics['miou'].append(miou)
#scheduler.step(val_loss)
# Write logs
with open('baseline_run/log_epoch.csv', 'a') as epoch_log:
epoch_log.write('{}, {:.5f}, {:.5f}, {:.5f}, {:.5f}, {:.5f}\n'.format(
epoch, train_loss, val_loss, train_acc, val_acc, miou))
# Save checkpoint
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'best_miou': best_miou,
'metrics': metrics,
}, 'baseline_run/checkpoint.pth.tar')
# Save best model to file
if miou > best_miou:
print('mIoU improved from {:.4f} to {:.4f}.'.format(best_miou, miou))
# best_miou = miou
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
}, 'baseline_run/best_weights.pth.tar')
if miou > 0.40:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
}, 'baseline_run/best_weights'+ str(miou)+'.pth.tar')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# Plot learning curves
x = np.arange(args.epochs)
fig, ax1 = plt.subplots()
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ln1 = ax1.plot(x, metrics['train_loss'], color='tab:red')
ln2 = ax1.plot(x, metrics['val_loss'], color='tab:red', linestyle='dashed')
ax1.grid()
ax2 = ax1.twinx()
ax2.set_ylabel('accuracy')
ln3 = ax2.plot(x, metrics['train_acc'], color='tab:blue')
ln4 = ax2.plot(x, metrics['val_acc'], color='tab:blue', linestyle='dashed')
ln5 = ax2.plot(x, metrics['miou'], color='tab:green')
lns = ln1+ln2+ln3+ln4+ln5
plt.legend(lns, ['Train loss','Validation loss','Train accuracy','Validation accuracy','mIoU'])
plt.tight_layout()
plt.savefig('baseline_run/learning_curve.pdf', bbox_inches='tight')
# Load best model
checkpoint = torch.load('baseline_run/best_weights.pth.tar')
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
print('Loaded best model weights (epoch {}) from baseline_run/best_weights.pth.tar'.format(checkpoint['epoch']))
# Create results directory
if not os.path.isdir('results'):
os.makedirs('results')
# Run prediction on validation set
# For predicting on test set, simple replace 'val' by 'test'
predict(dataloaders['val'], model, MiniCity.mask_colors)
"""
=================
Routine functions
=================
"""
def train_epoch(trainset, model, criterion, optimizer, lr_scheduler, epoch, void=-1):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_running = AverageMeter('Loss', ':.4e')
acc_running = AverageMeter('Accuracy', ':.3f')
trainset.create_an_epoch()
dataloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time, loss_running, acc_running],
prefix="Train, epoch: [{}]".format(epoch))
# input resolution
res = args.crop_size[0] * args.crop_size[1]
if epoch in [200, 400]:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr']/10
#adjust_learning_rate(optimizer, epoch)
# Set model in training mode
model.train()
end = time.time()
with torch.set_grad_enabled(True):
# Iterate over data.
for epoch_step, (inputs, labels) in enumerate(dataloader):
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
labels = labels.long().cuda()
_, _, h, w = inputs.shape
# zero the parameter gradients
optimizer.zero_grad()
# forward pass
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
# backward pass
loss.backward()
optimizer.step()
# Statistics
bs = inputs.size(0) # current batch size
loss = loss.item()
loss_running.update(loss, bs)
corrects = torch.sum(preds == labels.data)
nvoid = int((labels==void).sum())
acc = corrects.double()/(bs*res-nvoid) # correct/(batch_size*resolution-voids)
acc_running.update(acc, bs)
# output training info
progress.display(epoch_step)
# Measure time
batch_time.update(time.time() - end)
end = time.time()
# Reduce learning rate
return loss_running.avg, acc_running.avg
def validate_epoch(dataloader, model, criterion, epoch, classLabels, validClasses, void=-1, maskColors=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_running = AverageMeter('Loss', ':.4e')
acc_running = AverageMeter('Accuracy', ':.4e')
iou = iouCalc(classLabels, validClasses, voidClass = void)
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time, loss_running, acc_running],
prefix="Test, epoch: [{}]".format(epoch))
# input resolution
res = args.test_size[0]*args.test_size[1]
# Set model in evaluation mode
model.eval() # TODO ADD PLATO SCHEDULAR INSPECT LOSSES
with torch.no_grad():
end = time.time()
for epoch_step, (inputs, labels, filepath) in enumerate(dataloader):
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
labels = labels.long().cuda()
# forward
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
# Statistics
bs = inputs.size(0) # current batch size
loss = loss.item()
loss_running.update(loss, bs)
corrects = torch.sum(preds == labels.data)
nvoid = int((labels==void).sum())
acc = corrects.double()/(bs*res-nvoid) # correct/(batch_size*resolution-voids)
acc_running.update(acc, bs)
# Calculate IoU scores of current batch
iou.evaluateBatch(preds, labels)
# Save visualizations of first batch
if epoch_step == 0 and maskColors is not None:
for i in range(inputs.size(0)):
filename = os.path.splitext(os.path.basename(filepath[i]))[0]
# Only save inputs and labels once
if epoch == 0:
img = visim(inputs[i,:,:,:])
label = vislbl(labels[i,:,:], maskColors)
if len(img.shape) == 3:
cv2.imwrite('baseline_run/images/{}.png'.format(filename),img[:,:,::-1])
else:
cv2.imwrite('baseline_run/images/{}.png'.format(filename),img)
cv2.imwrite('baseline_run/images/{}_gt.png'.format(filename),label[:,:,::-1])
# Save predictions
pred = vislbl(preds[i,:,:], maskColors)
cv2.imwrite('baseline_run/images/{}_epoch_{}.png'.format(filename,epoch),pred[:,:,::-1])
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress info
progress.display(epoch_step)
miou = iou.outputScores()
print('Accuracy : {:5.3f}'.format(acc_running.avg))
print('---------------------')
return acc_running.avg, loss_running.avg, miou
def predict(dataloader, model, maskColors):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time],
prefix='Predict: ')
# Set model in evaluation mode
model.eval()
with torch.no_grad():
end = time.time()
for epoch_step, batch in enumerate(dataloader):
if len(batch) == 2:
inputs, filepath = batch
else:
inputs, _, filepath = batch
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
# forward
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
# Save visualizations of first batch
for i in range(inputs.size(0)):
filename = os.path.splitext(os.path.basename(filepath[i]))[0]
# Save input
img = visim(inputs[i,:,:,:])
img = Image.fromarray(img, 'RGB')
img.save('baseline_run/results_color/{}_input.png'.format(filename))
# Save prediction with color labels
pred = preds[i,:,:].cpu()
pred_color = vislbl(pred, maskColors)
pred_color = Image.fromarray(pred_color.astype('uint8'))
pred_color.save('baseline_run/results_color/{}_prediction.png'.format(filename))
# Save class id prediction (used for evaluation)
pred_id = MiniCity.trainid2id[pred]
pred_id = Image.fromarray(pred_id)
pred_id = pred_id.resize((2048,1024), resample=Image.NEAREST)
pred_id.save('results/{}.png'.format(filename))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress info
progress.display(epoch_step)
"""
====================
Data transformations
====================
"""
def test_trans(image, mask=None):
# Resize, 1 for Image.LANCZOS
image = TF.resize(image, args.test_size, interpolation=1)
# From PIL to Tensor
image = TF.to_tensor(image)
# Normalize
image = TF.normalize(image, args.dataset_mean, args.dataset_std)
if mask:
# Resize, 0 for Image.NEAREST
mask = TF.resize(mask, args.test_size, interpolation=0)
mask = np.array(mask, np.uint8) # PIL Image to numpy array
mask = torch.from_numpy(mask) # Numpy array to tensor
return image, mask
else:
return image
def train_trans(image, mask):
# Generate random parameters for augmentation
bf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
cf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
sf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
hf = np.random.uniform(-args.colorjitter_factor,+args.colorjitter_factor)
scale_factor = np.random.uniform(1-args.scale_factor,1+args.scale_factor)
pflip = np.random.randint(0,1) > 0.5
# Resize, 1 for Image.LANCZOS
image = TF.resize(image, args.train_size, interpolation=1)
# Resize, 0 for Image.NEAREST
mask = TF.resize(mask, args.train_size, interpolation=0)
# Random scaling
image = TF.affine(image, 0, [0,0], scale_factor, [0,0])
mask = TF.affine(mask, 0, [0,0], scale_factor, [0,0])
# Random cropping
if not args.train_size == args.crop_size:
# From PIL to Tensor
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
h, w = args.train_size
th, tw = args.crop_size
i = np.random.randint(0, h - th)
j = np.random.randint(0, w - tw)
image = image[:,i:i+th,j:j+tw]
mask = mask[:,i:i+th,j:j+tw]
image = TF.to_pil_image(image)
mask = TF.to_pil_image(mask[0,:,:])
# H-flip
if pflip == True and args.hflip == True:
image = TF.hflip(image)
mask = TF.hflip(mask)
# Color jitter
image = TF.adjust_brightness(image, bf)
image = TF.adjust_contrast(image, cf)
image = TF.adjust_saturation(image, sf)
image = TF.adjust_hue(image, hf)
# From PIL to Tensor
image = TF.to_tensor(image)
# Normalize
image = TF.normalize(image, args.dataset_mean, args.dataset_std)
# Convert ids to train_ids
mask = np.array(mask, np.uint8) # PIL Image to numpy array
mask = torch.from_numpy(mask) # Numpy array to tensor
return image, mask
def train_trans_alt(image, mask):
colorjitter_factor = 0.2
th, tw = args.train_size
h, w = 512, 1024
crop_scales = [1.0, 0.8, 0.6, 0.4]
# Generate random parameters for augmentation
pflip = np.random.randint(0,1) > 0.5
bf = np.random.uniform(1-colorjitter_factor,1+colorjitter_factor)
cf = np.random.uniform(1-colorjitter_factor,1+colorjitter_factor)
sf = np.random.uniform(1-colorjitter_factor,1+colorjitter_factor)
hf = np.random.uniform(-colorjitter_factor,colorjitter_factor)
# Resize, 1 for Image.LANCZOS
image = TF.resize(image, (h, w), interpolation=1)
# Resize, 0 for Image.NEAREST
mask = TF.resize(mask, (h, w), interpolation=0)
# Random cropping
# From PIL to Tensor
crop_scale = | np.random.choice(crop_scales) | numpy.random.choice |
import functools
import numpy
import numpy.testing
import pytest
import six.moves
import skimage.util
import tests.modules
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.module
import cellprofiler.modules.imagemath
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
MEASUREMENT_NAME = "mymeasurement"
@pytest.fixture(scope="function")
def module():
return cellprofiler.modules.imagemath.ImageMath()
@pytest.fixture(scope="function")
def workspace(image_a, image_b, module):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
workspace = cellprofiler_core.workspace.Workspace(
image_set=image_set,
image_set_list=image_set_list,
module=module,
pipeline=cellprofiler_core.pipeline.Pipeline(),
measurements=cellprofiler_core.measurement.Measurements(),
object_set=cellprofiler_core.object.ObjectSet(),
)
workspace.image_set.add("input_a", image_a)
workspace.image_set.add("input_b", image_b)
module.images[0].image_name.value = "input_a"
module.images[0].factor.value = 1.0
module.images[1].image_name.value = "input_b"
module.images[1].factor.value = 1.0
module.truncate_low.value = False
module.truncate_high.value = False
module.output_image_name.value = "output"
return workspace
def run_operation(operation, expected, module, workspace):
module.operation.value = operation
module.replace_nan.value = False
module.run(workspace)
output = workspace.image_set.get_image("output")
actual = output.pixel_data
numpy.testing.assert_array_equal(actual, expected)
class TestVolumes(object):
@staticmethod
@pytest.fixture(scope="function")
def image_a():
k, i, j = numpy.mgrid[-5:6, -5:6, -5:10]
data_a = numpy.zeros((11, 11, 15))
data_a[k ** 2 + i ** 2 + j ** 2 <= 25] = 1
image_a = cellprofiler_core.image.Image()
image_a.pixel_data = data_a
image_a.dimensions = 3
return image_a
@staticmethod
@pytest.fixture(scope="function")
def image_b():
k, i, j = numpy.mgrid[-5:6, -5:6, -10:5]
data_b = numpy.zeros((11, 11, 15))
data_b[k ** 2 + i ** 2 + j ** 2 <= 25] = 0.5
image_b = cellprofiler_core.image.Image()
image_b.pixel_data = data_b
image_b.dimensions = 3
return image_b
@staticmethod
def test_add(image_a, image_b, module, workspace):
operation = "Add"
expected = image_a.pixel_data + image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_subtract(image_a, image_b, module, workspace):
operation = "Subtract"
expected = image_a.pixel_data - image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_absolute_difference(image_a, image_b, module, workspace):
operation = "Absolute Difference"
expected = numpy.abs(image_a.pixel_data - image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_multiply(image_a, image_b, module, workspace):
operation = "Multiply"
expected = image_a.pixel_data * image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_divide(image_a, image_b, module, workspace):
operation = "Divide"
expected = image_a.pixel_data / image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_average(image_a, image_b, module, workspace):
operation = "Average"
expected = (image_a.pixel_data + image_b.pixel_data) / 2.0
run_operation(operation, expected, module, workspace)
@staticmethod
def test_minimum(image_a, image_b, module, workspace):
operation = "Minimum"
expected = numpy.minimum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_maximum(image_a, image_b, module, workspace):
operation = "Maximum"
expected = numpy.maximum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_invert(image_a, module, workspace):
operation = "Invert"
expected = skimage.util.invert(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_log_transform(image_a, module, workspace):
operation = "Log transform (base 2)"
expected = numpy.log2(image_a.pixel_data + 1)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_and(image_a, image_b, module, workspace):
operation = "And"
expected = 1.0 * numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_or(image_a, image_b, module, workspace):
operation = "Or"
expected = | numpy.logical_or(image_a.pixel_data, image_b.pixel_data) | numpy.logical_or |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy import sparse
from scipy.sparse import csgraph
def check_int_type(mat):
return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
mat.dtype, np.uint
)
def test_laplacian_value_error():
for t in int, float, complex:
for m in ([1, 1],
[[[1]]],
[[1, 2, 3], [4, 5, 6]],
[[1, 2], [3, 4], [5, 5]]):
A = np.array(m, dtype=t)
assert_raises(ValueError, csgraph.laplacian, A)
def _explicit_laplacian(x, normed=False):
if sparse.issparse(x):
x = x.toarray()
x = np.asarray(x)
y = -1.0 * x
for j in range(y.shape[0]):
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
if normed:
d = np.diag(y).copy()
d[d == 0] = 1.0
y /= d[:,None]**.5
y /= d[None,:]**.5
return y
def _check_symmetric_graph_laplacian(mat, normed, copy=True):
if not hasattr(mat, 'shape'):
mat = eval(mat, dict(np=np, sparse=sparse))
if sparse.issparse(mat):
sp_mat = mat
mat = sp_mat.toarray()
else:
sp_mat = sparse.csr_matrix(mat)
mat_copy = np.copy(mat)
sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
n_nodes = mat.shape[0]
explicit_laplacian = _explicit_laplacian(mat, normed=normed)
laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
copy=copy)
if copy:
assert_allclose(mat, mat_copy)
_assert_allclose_sparse(sp_mat, sp_mat_copy)
else:
if not (normed and check_int_type(mat)):
assert_allclose(laplacian, mat)
if sp_mat.format == 'coo':
_assert_allclose_sparse(sp_laplacian, sp_mat)
assert_allclose(laplacian, sp_laplacian.toarray())
for tested in [laplacian, sp_laplacian.toarray()]:
if not normed:
assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
assert_allclose(tested.T, tested)
assert_allclose(tested, explicit_laplacian)
def test_symmetric_graph_laplacian():
symmetric_mats = (
'np.arange(10) * np.arange(10)[:, np.newaxis]',
'np.ones((7, 7))',
'np.eye(19)',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
)
for mat in symmetric_mats:
for normed in True, False:
for copy in True, False:
_check_symmetric_graph_laplacian(mat, normed, copy)
def _assert_allclose_sparse(a, b, **kwargs):
# helper function that can deal with sparse matrices
if sparse.issparse(a):
a = a.toarray()
if sparse.issparse(b):
b = b.toarray()
assert_allclose(a, b, **kwargs)
def _check_laplacian_dtype_none(
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
):
mat = arr_type(A, dtype=dtype)
L, d = csgraph.laplacian(
mat,
normed=normed,
return_diag=True,
use_out_degree=use_out_degree,
copy=copy,
dtype=None,
)
if normed and check_int_type(mat):
assert L.dtype == np.float64
assert d.dtype == np.float64
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
else:
assert L.dtype == dtype
assert d.dtype == dtype
desired_L = np.asarray(desired_L).astype(dtype)
desired_d = np.asarray(desired_d).astype(dtype)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
if not copy:
if not (normed and check_int_type(mat)):
if type(mat) is np.ndarray:
| assert_allclose(L, mat) | numpy.testing.assert_allclose |
# This script implements the sampling-based validation of the analytical results of agent A4
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from GbTaskVars import TaskVars
from GbTask import Task
from GbAgentVars import AgentVars
from GbAgent import Agent
from PHat import PHat
from gb_cumcoeff import gb_cumcoeff
from gb_invcumcoeff import gb_invcumcoeff
from gb_plot_utils import label_subplots, cm2inch
from time import sleep
from tqdm import tqdm
import sys
# Use Latex for matplotlib
pgf_with_latex = {
"pgf.texsystem": "pdflatex",
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": [],
"axes.labelsize": 6,
"font.size": 6,
"legend.fontsize": 6,
"axes.titlesize": 6,
"xtick.labelsize": 6,
"ytick.labelsize": 6,
"figure.titlesize": 6,
"pgf.rcfonts": False,
"figure.dpi": 100,
"text.latex.unicode": True,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{cmbright}",
]
}
# Update parameters
matplotlib.rcParams.update(pgf_with_latex)
# Set random number generator for reproducible results
np.random.seed(123)
# Todo: in the first part the validation we not yet sample. Add this in next version.
# Simulation parameters
T = 26 # Number of trials
n_samples = int(1e6) # Number of samples
n_bins = int(1e2) # Number of bins for density approximation
# Model parameters
kappa = 0.08 # Maximal contrast difference value
sigma = 0.04 # Perceptual sensitivity
# Initialize task and agent objects
# ---------------------------------
# Task parameters in TaskVars object
task_vars = TaskVars()
task_vars.T = T
task_vars.B = 1
task = Task(task_vars)
# Agent parameters in AgentVars object
agent_vars = AgentVars()
agent_vars.agent = 1
agent_vars.sigma = sigma
agent_vars.task_agent_analysis = False
agent = Agent(agent_vars)
agent.d_t = 0 # Fix perceptual decision to d_t = 0
agent.a_t = 0 # Fix action to a_t = 0
# Sampling-based approximation object
p_hat = PHat(n_bins)
# Number of variables x number of samples x number of time points sample matrix
S = np.full([5, n_samples, T], np.nan)
# Simulation scenarios / observed random variable values
# ------------------------------------------------------
O_list = list()
R_list = list()
# Reliable co-occurrence of a clear positive contrast difference and no reward
O_list.append(np.repeat([0.08], T))
R_list.append(np.repeat([0], T))
# Reliable co-occurrence of a clear positive contrast difference and reward
O_list.append(np.repeat([0.08], T))
R_list.append(np.repeat([1], T))
# Clear negative contrast difference and alternating reward
O_list.append(np.repeat([-0.08], T))
R_list.append(np.tile([1, 0], [np.int(T/2)]))
# Weak positive contrast difference and alternating reward
O_list.append(np.repeat([0.02], T))
R_list.append(np.tile([1, 0], [np.int(T/2)]))
# Alternating weak positive and negative contrast differences and alternating reward
O_list.append(np.repeat([0.02, -0.02], [np.int(T/2)]))
R_list.append(np.tile([1, 0], [np.int(T/2)]))
# Resulting number of simulation scenarios
n_sim = len(O_list)
# Initialize variables for the computation of the estimation bias
mu_bias_mean = np.full(T * n_sim, np.nan)
bs0_bias_mean = | np.full(T * n_sim, np.nan) | numpy.full |
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torch import device as torchDevice
from genEM3.util import gpu
class Trainer:
def __init__(self,
run_root: str,
model: nn.Module,
optimizer: torch.optim.Optimizer,
criterion: nn.MSELoss,
data_loaders: {},
num_epoch: int = 100,
log_int: int = 10,
device: str = 'cpu',
save: bool = False,
resume: bool = False,
gpu_id: int = None
):
self.run_root = run_root
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.num_epoch = num_epoch
self.log_int = log_int
self.save = save
self.resume = resume
if device == 'cuda':
gpu.get_gpu(gpu_id)
device = torch.device(torch.cuda.current_device())
self.device = torchDevice(device)
self.log_root = os.path.join(run_root, '.log')
self.data_loaders = data_loaders
self.data_lengths = dict(zip(self.data_loaders.keys(), [len(loader) for loader in self.data_loaders]))
if save:
if not os.path.exists(self.log_root):
os.makedirs(self.log_root)
def train(self):
if self.resume:
print('Resuming training ...')
checkpoint = torch.load(os.path.join(self.log_root, 'torch_model'))
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
print('Starting training ...')
writer = SummaryWriter(self.log_root)
self.model = self.model.to(self.device)
epoch = int(self.model.epoch) + 1
it = int(self.model.iteration)
for epoch in range(epoch, epoch + self.num_epoch):
epoch_root = 'epoch_{:02d}'.format(epoch)
if not os.path.exists(os.path.join(self.log_root, epoch_root)):
os.makedirs(os.path.join(self.log_root, epoch_root))
for phase in self.data_loaders.keys():
epoch_loss = 0
if phase == 'train':
self.model.train(True)
else:
self.model.train(False)
running_loss = 0.0
for i, (data, index) in enumerate(self.data_loaders[phase]):
it += 1
# copy input and targets to the device object
inputs = data['input'].to(self.device)
targets = data['target'].to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
if phase == 'train':
loss.backward()
self.optimizer.step()
# print statistics
running_loss += loss.item()
epoch_loss += loss.item()
if (i + 1) % self.log_int == 0:
running_loss_avg = running_loss/self.log_int
print('Phase: ' + phase + ', epoch: {}, batch {}: running loss: {:0.3f}'.
format(self.model.epoch, i + 1, running_loss_avg))
writer.add_scalars('running_loss', {phase: running_loss_avg}, it)
running_loss = 0.0
if phase in ['train', 'val']:
epoch_loss_avg = epoch_loss / self.data_lengths[phase]
print('Phase: ' + phase + ', epoch: {}: epoch loss: {:0.3f}'.
format(epoch, epoch_loss_avg))
writer.add_scalars('epoch_loss', {phase: epoch_loss_avg}, epoch)
writer.add_histogram('input histogram', inputs.cpu().data.numpy()[0, 0].flatten(), epoch)
writer.add_histogram('output histogram', outputs.cpu().data.numpy()[0, 0].flatten(), epoch)
figure_inds = list(range(inputs.shape[0]))
figure_inds = figure_inds if len(figure_inds) < 4 else list(range(4))
fig = Trainer.show_imgs(inputs, outputs, figure_inds)
fig.savefig(os.path.join(self.log_root, epoch_root, phase+'.png'))
writer.add_figure(
'images ' + phase, fig, epoch)
if self.save & (phase == 'train'):
print('Writing model graph...')
writer.add_graph(self.model, inputs)
print('Saving model state...')
self.model.epoch = torch.nn.Parameter(torch.tensor(epoch), requires_grad=False)
self.model.iteration = torch.nn.Parameter(torch.tensor(it), requires_grad=False)
torch.save({
'model_state_dict': self.model.state_dict(),
}, os.path.join(self.log_root, epoch_root, 'model_state_dict'))
torch.save({
'optimizer_state_dict': self.optimizer.state_dict()
}, os.path.join(self.log_root, 'optimizer_state_dict'))
print('Finished training ...')
writer.close()
print('Writer closed ...')
# dictionary of accuracy metrics for tune hyperparameter optimization
return {"val_loss_avg": epoch_loss_avg}
@staticmethod
def copy2cpu(inputs, outputs):
if inputs.is_cuda:
inputs = inputs.cpu()
if outputs.is_cuda:
outputs = outputs.cpu()
return inputs, outputs
@staticmethod
def n1hw_to_n3hw(data):
return data.cpu().repeat(1, 3, 1, 1)
@staticmethod
def show_img(inputs, outputs, idx):
inputs, outputs = Trainer.copy2cpu(inputs, outputs)
fig, axs = plt.subplots(1, 2, figsize=(4, 3))
axs[0].imshow(inputs[idx].data.numpy().squeeze(), cmap='gray')
axs[1].imshow(outputs[idx].data.numpy().squeeze(), cmap='gray')
return fig
@staticmethod
def show_imgs(inputs, outputs, inds):
inputs, outputs = Trainer.copy2cpu(inputs, outputs)
fig, axs = plt.subplots(1, len(inds), figsize=(3*len(inds), 6))
for i, idx in enumerate(inds):
input_ = inputs[idx].data.numpy().squeeze()
output = outputs[idx].data.numpy().squeeze()
if input_.shape != output.shape:
output = | np.tile(output, input_.shape) | numpy.tile |
"""
PSF calulcations for cylidrical lenses
see e.g.
Purnapatra, <NAME>, <NAME>.
Determination of electric field at and near the focus of a cylindrical lens for applications in fluorescence microscopy (2013)
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import time
from gputools import OCLArray, OCLProgram
def absPath(myPath):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)
def _poly_points(N=6):
"""returns the coordinates of a regular polygon on the unit circle"""
ts = np.pi*(.5+2./N*np.arange(N))
return np.stack([np.cos(ts), np.sin(ts)])
def focus_field_lattice(shape=(128, 128, 128),
units=(0.1, 0.1, 0.1),
lam=.5, NA1=.4, NA2=.5,
sigma=.1,
kpoints=6,
return_all_fields=False,
n0=1., n_integration_steps=100):
"""Calculates the focus field for a bessel lattice.
The pupil function consists out of discrete points (kpoints) superimposed on an annulus (NA1<NA2)
which are smeared out by a 1d gaussian of given sigma creating an array of bessel beams in the
focal plane (see [3]_ ).
Parameters
----------
shape: Nx,Ny,Nz
the shape of the geometry
units: dx,dy,dz
the pixel sizes in microns
lam: float
the wavelength of light used in microns
NA1: float/list
the numerical aperture of the inner ring
NA2: float/list
the numerical aperture of the outer ring
sigma: float
the standard deviation of the gaussian smear function applied to each point on the aperture
(the bigger sigma, the tighter the sheet in y)
kpoints: int/ (2,N) array
defines the set of points on the aperture that create the lattice, can be
- a (2,N) ndarray, such that kpoints[:,i] are the coordinates of the ith point
- a single int, defining points on a regular polygon (e.g. 4 for a square lattice, 6 for a hex lattice)
:math:`k_i = \\arcsin\\frac{NA_1+NA_2}{2 n_0} \\begin{pmatrix} \\cos \\phi_i \\\\ \\sin \\phi_i \\end{pmatrix}\quad, \\phi_i = \\frac{\\pi}{2}+\\frac{2i}{N}`
n0: float
the refractive index of the medium
n_integration_steps: int
number of integration steps to perform
return_all_fields: boolean
if True, returns u,ex,ey,ez where ex/ey/ez are the complex vector field components
Returns
-------
u: ndarray
the intensity of the focus field
(u,ex,ey,ez): list(ndarray)
the intensity of the focus field and the complex field components (if return_all_fields is True)
Example
-------
>>> u = focus_field_lattice((128,128,128), (0.1,0.1,.1), lam=.5, NA1 = .44, NA2 = .55, kpoints = 6)
References
----------
.. [3] Chen et al. Lattice light-sheet microscopy: imaging molecules to embryos at high spatiotemporal resolution. Science 346, (2014).
"""
alpha1 = np.arcsin(1.*NA1/n0)
alpha2 = np.arcsin(1.*NA2/n0)
if | np.isscalar(kpoints) | numpy.isscalar |
import matplotlib, zipfile
matplotlib.use('agg')
import sys, numpy as np, matplotlib.pyplot as plt, os, tools21cm as t2c, matplotlib.gridspec as gridspec
from sklearn.metrics import matthews_corrcoef
from glob import glob
from tensorflow.keras.models import load_model
from tqdm import tqdm
from config.net_config import NetworkConfig
from utils.other_utils import RotateCube
from utils_network.metrics import iou, iou_loss, dice_coef, dice_coef_loss, balanced_cross_entropy, phi_coef
from utils_network.prediction import SegUnet21cmPredict
from myutils.utils import OrderNdimArray
title_a = '\t\t _ _ _ _ _ \n\t\t| | | | \ | | | | \n\t\t| | | | \| | ___| |_ \n\t\t| | | | . ` |/ _ \ __|\n\t\t| |__| | |\ | __/ |_ \n\t\t \____/|_| \_|\___|\__|\n'
title_b = ' _____ _ _ _ ___ __ \n| __ \ | (_) | | |__ \/_ | \n| |__) | __ ___ __| |_ ___| |_ ___ ) || | ___ _ __ ___ \n| ___/ `__/ _ \/ _` | |/ __| __/ __| / / | |/ __| `_ ` _ \ \n| | | | | __/ (_| | | (__| |_\__ \ / /_ | | (__| | | | | |\n|_| |_| \___|\__,_|_|\___|\__|___/ |____||_|\___|_| |_| |_|\n'
print(title_a+'\n'+title_b)
config_file = sys.argv[1]
conf = PredictionConfig(config_file)
PATH_OUT = conf.path_out
PATH_INPUT = conf.path+conf.pred_data
print(' PATH_INPUT = %s' %PATH_INPUT)
if(PATH_INPUT[-3:] == 'zip'):
ZIPFILE = True
PATH_IN_ZIP = PATH_INPUT[PATH_INPUT.rfind('/')+1:-4]+'/'
PATH_UNZIP = PATH_INPUT[:PATH_INPUT.rfind('/')+1]
MAKE_PLOTS = True
# load model
avail_metrics = {'binary_accuracy':'binary_accuracy', 'iou':iou, 'dice_coef':dice_coef, 'iou_loss':iou_loss, 'dice_coef_loss':dice_coef_loss, 'phi_coef':phi_coef, 'mse':'mse', 'mae':'mae', 'binary_crossentropy':'binary_crossentropy', 'balanced_cross_entropy':balanced_cross_entropy}
MODEL_EPOCH = conf.best_epoch
METRICS = [avail_metrics[m] for m in np.append(conf.loss, conf.metrics)]
cb = {func.__name__:func for func in METRICS if not isinstance(func, str)}
model = load_model('%smodel-sem21cm_ep%d.h5' %(PATH_OUT+'checkpoints/', MODEL_EPOCH), custom_objects=cb)
try:
os.makedirs(PATH_OUT+'predictions')
except:
pass
PATH_OUT += 'predictions/pred_tobs1200/'
print(' PATH_OUTPUT = %s' %PATH_OUT)
try:
os.makedirs(PATH_OUT+'data')
os.makedirs(PATH_OUT+'plots')
except:
pass
if(os.path.exists('%sastro_data.txt' %PATH_OUT)):
astr_data = np.loadtxt('%sastro_data.txt' %PATH_OUT, unpack=True)
restarts = astr_data[6:].argmin(axis=1)
if(all(int(np.mean(restarts)) == restarts)):
restart = int(np.mean(restarts))
print(' Restart from idx=%d' %restart)
else:
ValueError(' Restart points does not match.')
phicoef_seg, phicoef_err, phicoef_sp, xn_mask, xn_seg, xn_err, xn_sp, b0_true, b1_true, b2_true, b0_seg, b1_seg, b2_seg, b0_sp, b1_sp, b2_sp = astr_data[6:]
astr_data = astr_data[:6]
else:
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
astr_data = np.loadtxt(myzip.open('%sastro_params.txt' %PATH_IN_ZIP), unpack=True)
else:
astr_data = np.loadtxt('%sastro_params.txt' %PATH_INPUT, unpack=True)
restart = 0
phicoef_seg = np.zeros(astr_data.shape[1])
phicoef_err = np.zeros_like(phicoef_seg)
phicoef_sp = np.zeros_like(phicoef_seg)
xn_mask = np.zeros_like(phicoef_seg)
xn_seg = np.zeros_like(phicoef_seg)
xn_err = np.zeros_like(phicoef_seg)
xn_sp = np.zeros_like(phicoef_sp)
b0_true = np.zeros_like(phicoef_sp)
b1_true = np.zeros_like(phicoef_sp)
b2_true = np.zeros_like(phicoef_sp)
b0_sp = np.zeros_like(phicoef_sp)
b1_sp = np.zeros_like(phicoef_sp)
b2_sp = np.zeros_like(phicoef_sp)
b0_seg = np.zeros_like(phicoef_sp)
b1_seg = np.zeros_like(phicoef_sp)
b2_seg = np.zeros_like(phicoef_sp)
params = {'HII_DIM':128, 'DIM':384, 'BOX_LEN':256}
my_ext = [0, params['BOX_LEN'], 0, params['BOX_LEN']]
zc = (astr_data[1,:] < 7.5) + (astr_data[1,:] > 8.3)
c1 = (astr_data[5,:]<=0.25)*(astr_data[5,:]>=0.15)*zc
c2 = (astr_data[5,:]<=0.55)*(astr_data[5,:]>=0.45)*zc
c3 = (astr_data[5,:]<=0.75)*(astr_data[5,:]>=0.85)*zc
indexes = astr_data[0,:]
new_idx = indexes[c1+c2+c3].astype(int)
#for i in tqdm(range(restart, astr_data.shape[1])):
print(new_idx)
for new_i in tqdm(range(3, new_idx.size)):
i = new_idx[new_i]
z = astr_data[1,i]
zeta = astr_data[2,i]
Rmfp = astr_data[3,i]
Tvir = astr_data[4,i]
xn = astr_data[5,i]
#print('z = %.3f x_n =%.3f zeta = %.3f R_mfp = %.3f T_vir = %.3f' %(z, xn, zeta, Rmfp, Tvir))
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
f = myzip.extract(member='%simage_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
dT3 = t2c.read_cbin(f)
f = myzip.extract(member='%smask_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
mask_xn = t2c.read_cbin(f)
os.system('rm -r %s/' %(PATH_UNZIP+PATH_IN_ZIP))
else:
dT3 = t2c.read_cbin('%simage_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
mask_xn = t2c.read_cbin('%smask_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
# Calculate Betti number
b0_true[i] = t2c.betti0(data=mask_xn)
b1_true[i] = t2c.betti1(data=mask_xn)
b2_true[i] = t2c.betti2(data=mask_xn)
xn_mask[i] = np.mean(mask_xn)
plt.rcParams['font.size'] = 20
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = False
plt.rcParams['ytick.right'] = False
plt.rcParams['axes.linewidth'] = 1.2
ls = 22
# -------- predict with SegUnet 3D --------
print(' calculating predictioon for data i = %d...' %i)
X_tta = SegUnet21cmPredict(unet=model, x=dT3, TTA=True)
X_seg = np.round(np.mean(X_tta, axis=0))
X_seg_err = np.std(X_tta, axis=0)
# calculate Matthew coef and mean neutral fraction
phicoef_seg[i] = matthews_corrcoef(mask_xn.flatten(), X_seg.flatten())
xn_seg[i] = np.mean(X_seg)
# calculate errors
phicoef_tta = np.zeros(X_tta.shape[0])
xn_tta = np.zeros(X_tta.shape[0])
for k in tqdm(range(len(X_tta))):
xn_tta[k] = np.mean(np.round(X_tta[k]))
phicoef_tta[k] = matthews_corrcoef(mask_xn.flatten(), np.round(X_tta[k]).flatten())
xn_err[i] = | np.std(xn_tta) | numpy.std |
"""Power operator."""
import numpy
from ..baseclass import Dist, StochasticallyDependentError
from .. import evaluation
class Pow(Dist):
"""Power operator."""
def __init__(self, left, right):
"""
Constructor.
Args:
left (Dist, numpy.ndarray) : Left hand side.
right (Dist, numpy.ndarray) : Right hand side.
"""
Dist.__init__(self, left=left, right=right)
def _bnd(self, xloc, left, right, cache):
"""
Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).range([-2, 0, 2, 4]))
[[0.5 0.5 0.5 0.5]
[1. 1. 1. 1. ]]
>>> print(chaospy.Pow(2, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[1. 1. 1. 1.]
[2. 2. 2. 2.]]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).range([-2, 0, 2, 4]))
[[0.5 0.5 0.5 0.5]
[1. 1. 1. 1. ]]
>>> print(chaospy.Pow(2, 3).range([-2, 0, 2, 4]))
[[8. 8. 8. 8.]
[8. 8. 8. 8.]]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right, left**right
else:
output = numpy.ones(xloc.shape)
left = left * output
assert numpy.all(left >= 0), "root of negative number"
indices = xloc > 0
output[indices] = numpy.log(xloc[indices])
output[~indices] = -numpy.inf
indices = left != 1
output[indices] /= numpy.log(left[indices])
output = evaluation.evaluate_bound(right, output, cache=cache)
output = left**output
output[:] = (
numpy.where(output[0] < output[1], output[0], output[1]),
numpy.where(output[0] < output[1], output[1], output[0]),
)
return output
output = numpy.zeros(xloc.shape)
right = right + output
indices = right > 0
output[indices] = numpy.abs(xloc[indices])**(1/right[indices])
output[indices] *= numpy.sign(xloc[indices])
output[right == 0] = 1
output[(xloc == 0) & (right < 0)] = numpy.inf
output = evaluation.evaluate_bound(left, output, cache=cache)
pair = right % 2 == 0
bnd_ = numpy.empty(output.shape)
bnd_[0] = numpy.where(pair*(output[0]*output[1] < 0), 0, output[0])
bnd_[0] = numpy.where(pair*(output[0]*output[1] > 0), \
numpy.min(numpy.abs(output), 0), bnd_[0])**right
bnd_[1] = numpy.where(pair, numpy.max(numpy.abs(output), 0),
output[1])**right
bnd_[0], bnd_[1] = (
numpy.where(bnd_[0] < bnd_[1], bnd_[0], bnd_[1]),
numpy.where(bnd_[0] < bnd_[1], bnd_[1], bnd_[0]),
)
return bnd_
def _cdf(self, xloc, left, right, cache):
"""
Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.70710678 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.33333333 0.75 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5849625 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.26303441 0.67807191 1. ]
>>> print(chaospy.Pow(2, 3).fwd([7, 8, 9]))
[0. 1. 1.]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.inf
else:
assert numpy.all(left > 0), "imaginary result"
y = (numpy.log(numpy.abs(xloc) + 1.*(xloc <= 0)) /
numpy.log(numpy.abs(left)+1.*(left == 1)))
out = evaluation.evaluate_forward(right, y)
out = numpy.where(xloc <= 0, 0., out)
return out
y = numpy.sign(xloc)*numpy.abs(xloc)**(1./right)
pairs = numpy.sign(xloc**right) != -1
out1, out2 = (
evaluation.evaluate_forward(left, y, cache=cache),
evaluation.evaluate_forward(left, -y, cache=cache),
)
out = numpy.where(right < 0, 1-out1, out1-pairs*out2)
return out
def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1-q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache)**right
return out
def _pdf(self, xloc, left, right, cache):
"""
Probability density function.
Example:
>>> print(chaospy.Uniform().pdf([-0.5, 0.5, 1.5, 2.5]))
[0. 1. 0. 0.]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).pdf([-0.5, 0.5, 1.5, 2.5]))
[0. 0.70710678 0. 0. ]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).pdf([0.4, 0.6, 0.8, 1.2]))
[0. 2.77777778 1.5625 0. ]
>>> print(chaospy.Pow(2, chaospy.Uniform()).pdf([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.96179669 0. ]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).pdf([0.4, 0.6, 0.8, 1.2]))
[0. 2.40449173 1.8033688 0. ]
>>> print(chaospy.Pow(2, 3).pdf([7, 8, 9]))
[ 0. inf 0.]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.inf
else:
assert numpy.all(left > 0), "imaginary result"
x_ = numpy.where(xloc <= 0, -numpy.inf,
numpy.log(xloc + 1.*(xloc<=0))/numpy.log(left+1.*(left == 1)))
num_ = numpy.log(left+1.*(left == 1))*xloc
num_ = num_ + 1.*(num_==0)
out = evaluation.evaluate_density(right, x_, cache=cache)/num_
return out
x_ = numpy.sign(xloc)*numpy.abs(xloc)**(1./right -1)
xloc = numpy.sign(xloc)*numpy.abs(xloc)**(1./right)
pairs = numpy.sign(xloc**right) == 1
out = evaluation.evaluate_density(left, xloc, cache=cache)
if numpy.any(pairs):
out = out + pairs*evaluation.evaluate_density(left, -xloc, cache=cache)
out = | numpy.sign(right) | numpy.sign |
import abc
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
import mujoco_py
from scipy.linalg import expm
import robosuite.utils.macros as macros
import robosuite.utils.angle_transformation as at
from robosuite.utils.control_utils import *
import robosuite.utils.transform_utils as T
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.interpolate import interp1d
class Controller(object, metaclass=abc.ABCMeta):
"""
General controller interface.
Requires reference to mujoco sim object, eef_name of specific robot, relevant joint_indexes to that robot, and
whether an initial_joint is used for nullspace torques or not
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
:`'joints'`: list of indexes to relevant robot joints
:`'qpos'`: list of indexes to relevant robot joint positions
:`'qvel'`: list of indexes to relevant robot joint velocities
actuator_range (2-tuple of array of float): 2-Tuple (low, high) representing the robot joint actuator range
"""
def __init__(self,
sim,
eef_name,
joint_indexes,
actuator_range,
plotting,
collect_data,
simulation_total_time,
):
# Actuator range
self.actuator_min = actuator_range[0]
self.actuator_max = actuator_range[1]
# Attributes for scaling / clipping inputs to outputs
self.action_scale = None
self.action_input_transform = None
self.action_output_transform = None
# Private property attributes
self.control_dim = None
self.output_min = None
self.output_max = None
self.input_min = None
self.input_max = None
# mujoco simulator state
self.sim = sim
self.model_timestep = macros.SIMULATION_TIMESTEP
self.eef_name = eef_name
self.joint_index = joint_indexes["joints"]
self.qpos_index = joint_indexes["qpos"]
self.qvel_index = joint_indexes["qvel"]
# robot states
self.ee_pos = None
self.ee_ori_mat = None
self.ee_pos_vel = None
self.ee_ori_vel = None
self.joint_pos = None
self.joint_vel = None
# dynamics and kinematics
self.J_pos = None
self.J_ori = None
self.J_full = None
self.mass_matrix = None
self.interaction_forces = None
self.interaction_forces_vec = []
self.PD_force_command = []
self.desired_frame_FT_vec = []
self.desired_frame_imp_position_vec = []
self.desired_frame_imp_ori_vec = []
self.desired_frame_imp_vel_vec = []
self.desired_frame_imp_ang_vel_vec = []
# Joint dimension
self.joint_dim = len(joint_indexes["joints"])
# Torques being outputted by the controller
self.torques = None
# Update flag to prevent redundant update calls
self.new_update = True
# Move forward one timestep to propagate updates before taking first update
self.sim.forward()
# Initialize controller by updating internal state and setting the initial joint, pos, and ori
self.update()
self.initial_joint = self.joint_pos
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
# minimum jerk specification - EC
self.initial_position = self.initial_ee_pos
self.final_position = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id("hole_middle_cylinder")])
self.final_position = [self.initial_position[0], self.initial_position[1]-0.2, self.initial_position[2]]
self.initial_orientation = self.initial_ee_ori_mat
# self.final_orientation = np.array([[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]]) # peg horizantal
self.final_orientation = np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]) # peg vertical (pointing down)
self.euler_initial_orientation = R.from_matrix(self.initial_orientation).as_euler('xyz', degrees=False)
self.euler_final_orientation = R.from_matrix(self.final_orientation).as_euler('xyz', degrees=False)
indexes_for_correction = np.abs(self.euler_final_orientation - self.euler_initial_orientation) > np.pi
correction = np.sign(self.euler_final_orientation) * (2 * np.pi) * indexes_for_correction
self.euler_final_orientation = self.euler_final_orientation - correction
self.simulation_total_time = simulation_total_time # from main
# EC - Run further definition and class variables
self._specify_constants()
# EC - this is the vector for the impedance equations
self.X_m = np.zeros((12, 1))
self.is_contact = False # becomes true when the peg hits the table
self.contact_time = 0.0
self.first_contact = True
self.Delta_T = 0.002
self.f_0 = np.array([0, 0, 0, 0, 0, 0])
self.K = 5000
self.M = 5
Wn = np.sqrt(self.K / self.M)
zeta = 0.707
# zeta = 1
self.C = 2 * self.M * zeta * Wn
# C = 0
self.K_imp = self.K * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
self.C_imp = self.C * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
self.M_imp = self.M * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# define if you want to plot some data
self.collect_data = collect_data
self.plotting = plotting
def impedance_computations(self):
# EC - compute next impedance Xm(n+1) and Vm(n+1) in world base frame.
# state space formulation
# X=[xm;thm;xm_d;thm_d] U=[F_int;M_int]
M_inv = np.linalg.pinv(self.M_imp)
A_1 = np.concatenate((np.zeros([6, 6], dtype=int), np.identity(6)), axis=1)
A_2 = np.concatenate((np.dot(-M_inv, self.K_imp), np.dot(-M_inv, self.C_imp)), axis=1)
A = np.concatenate((A_1, A_2), axis=0)
B_1 = np.zeros([6, 6], dtype=int)
B_2 = M_inv
B = np.concatenate((B_1, B_2), axis=0)
# discrete state space A, B matrices interaction_forces
A_d = expm(A * self.Delta_T)
B_d = np.dot(np.dot(np.linalg.pinv(A), (A_d - np.identity(A_d.shape[0]))), B)
# convert the forces and torques to the desired frame
Rotation_world_to_desired = R.from_euler("xyz", self.min_jerk_orientation, degrees=False).as_matrix()
Rotation_desired_to_world = Rotation_world_to_desired.T
F_d = Rotation_desired_to_world @ self.interaction_forces[:3]
M_d = Rotation_desired_to_world @ self.interaction_forces[3:6]
f_0 = np.concatenate((Rotation_desired_to_world @ self.f_0[:3],
Rotation_desired_to_world @ self.f_0[3:6]), axis=0)
U = (f_0 + np.concatenate((F_d, M_d), axis=0)).reshape(6, 1)
# only for graphs!
if self.collect_data:
self.desired_frame_FT_vec.append(np.array(U))
self.desired_frame_imp_position_vec.append(np.array((self.X_m[:3]).reshape(3, )))
self.desired_frame_imp_ori_vec.append(np.array((self.X_m[3:6]).reshape(3, )))
self.desired_frame_imp_vel_vec.append(np.array((self.X_m[6:9]).reshape(3, )))
self.desired_frame_imp_ang_vel_vec.append(np.array((self.X_m[9:12]).reshape(3, )))
# discrete state solution X(k+1)=Ad*X(k)+Bd*U(k)
X_m_next = np.dot(A_d, self.X_m.reshape(12, 1)) + np.dot(B_d, U)
self.X_m = deepcopy(X_m_next)
return
def _min_jerk(self):
"""
EC
Compute the value of position velocity and acceleration in a minimum jerk trajectory
"""
t = self.time
x_traj = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.X_init
y_traj = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.Y_init
z_traj = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.Z_init
self.min_jerk_position = np.array([x_traj, y_traj, z_traj])
# velocities
vx = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vy = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vz = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
self.min_jerk_velocity = np.array([vx, vy, vz])
# acceleration
ax = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
ay = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
az = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
self.min_jerk_acceleration = np.array([ax, ay, az])
# euler xyz representation
alfa = (self.euler_final_orientation[0] - self.euler_initial_orientation[0]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[0]
beta = (self.euler_final_orientation[1] - self.euler_initial_orientation[1]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[1]
gamma = (self.euler_final_orientation[2] - self.euler_initial_orientation[2]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[2]
alfa_dot = (self.euler_final_orientation[0] - self.euler_initial_orientation[0]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
beta_dot = (self.euler_final_orientation[1] - self.euler_initial_orientation[1]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
gamma_dot = (self.euler_final_orientation[2] - self.euler_initial_orientation[2]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
self.min_jerk_orientation = np.array([alfa, beta, gamma])
self.min_jerk_orientation_dot = np.array([alfa_dot, beta_dot, gamma_dot])
R_world_to_body = R.from_euler('xyz', self.min_jerk_orientation, degrees=False).as_matrix()
# w = T*V -- the angular velocity
self.min_jerk_ang_vel = R_world_to_body @ (T.T_mat(self.min_jerk_orientation) @
self.min_jerk_orientation_dot.T)
return
def _specify_constants(self):
"""
EC
Assign constants in class variables
"""
self.X_init = self.initial_position[0]
self.Y_init = self.initial_position[1]
self.Z_init = self.initial_position[2]
self.X_final = self.final_position[0]
self.Y_final = self.final_position[1]
self.Z_final = self.final_position[2]
self.min_jerk_position = None
self.min_jerk_velocity = None
self.min_jerk_acceleration = None
self.min_jerk_orientation = None
self.min_jerk_orientation_dot = None
self.min_jerk_ang_vel = None
self.min_jerk_ang_acc = None
self.min_jerk_position_vec = []
self.min_jerk_velocity_vec = []
self.min_jerk_acceleration_vec = []
self.min_jerk_orientation_vec = []
self.min_jerk_orientation_dot_vec = []
self.min_jerk_angle_velocity_vec = []
self.tfinal = 2 # this is for the minimum jerk
self.time = 0.0
self.time_vec = []
self.real_position = None
self.real_velocity = None
self.real_orientation = None
self.real_angle_velocity = None
self.real_position_vec = []
self.real_velocity_vec = []
self.real_orientation_vec = []
self.real_angle_velocity_vec = []
self.impedance_orientation = []
self.impedance_position_vec = []
self.impedance_velocity_vec = []
self.impedance_acceleration_vec = []
self.impedance_orientation_vec = []
self.impedance_angle_velocity_vec = []
@abc.abstractmethod
def run_controller(self):
"""
Abstract method that should be implemented in all subclass controllers, and should convert a given action
into torques (pre gravity compensation) to be executed on the robot.
Additionally, resets the self.new_update flag so that the next self.update call will occur
"""
self.new_update = True
def scale_action(self, action):
"""
Clips @action to be within self.input_min and self.input_max, and then re-scale the values to be within
the range self.output_min and self.output_max
Args:
action (Iterable): Actions to scale
Returns:
np.array: Re-scaled action
"""
if self.action_scale is None:
self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min)
self.action_output_transform = (self.output_max + self.output_min) / 2.0
self.action_input_transform = (self.input_max + self.input_min) / 2.0
action = np.clip(action, self.input_min, self.input_max)
transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform
return transformed_action
def update(self, force=False):
"""
Updates the state of the robot arm, including end effector pose / orientation / velocity, joint pos/vel,
jacobian, and mass matrix. By default, since this is a non-negligible computation, multiple redundant calls
will be ignored via the self.new_update attribute flag. However, if the @force flag is set, the update will
occur regardless of that state of self.new_update. This base class method of @run_controller resets the
self.new_update flag
Args:
force (bool): Whether to force an update to occur or not
"""
# Only run update if self.new_update or force flag is set
# if self.new_update or force:
self.sim.forward()
self.time = self.sim.data.time
self.peg_edge = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id("peg_site")])
self.ee_pos = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id(self.eef_name)])
self.ee_ori_mat = np.array(self.sim.data.site_xmat[self.sim.model.site_name2id(self.eef_name)].reshape([3, 3]))
self.ee_pos_vel = np.array(self.sim.data.site_xvelp[self.sim.model.site_name2id(self.eef_name)])
self.ee_ori_vel = np.array(self.sim.data.site_xvelr[self.sim.model.site_name2id(self.eef_name)])
self.joint_pos = np.array(self.sim.data.qpos[self.qpos_index])
self.joint_vel = np.array(self.sim.data.qvel[self.qvel_index])
self.J_pos = np.array(self.sim.data.get_site_jacp(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_ori = np.array(self.sim.data.get_site_jacr(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_full = np.array(np.vstack([self.J_pos, self.J_ori]))
mass_matrix = np.ndarray(shape=(len(self.sim.data.qvel) ** 2,), dtype=np.float64, order='C')
mujoco_py.cymj._mj_fullM(self.sim.model, mass_matrix, self.sim.data.qM)
mass_matrix = np.reshape(mass_matrix, (len(self.sim.data.qvel), len(self.sim.data.qvel)))
self.mass_matrix = mass_matrix[self.qvel_index, :][:, self.qvel_index]
# EC - force readings
# the forces needs to be transform to the world base frame
# the minus sign is because the measured forces are the forces that the robot apply on the environment
forces_world = np.dot(self.ee_ori_mat, -self.sim.data.sensordata[:3])
torques_world = np.dot(self.ee_ori_mat, -self.sim.data.sensordata[3:6])
self.interaction_forces = np.concatenate((forces_world, torques_world), axis=0)
# Clear self.new_update
self.new_update = False
def update_base_pose(self, base_pos, base_ori):
"""
Optional function to implement in subclass controllers that will take in @base_pos and @base_ori and update
internal configuration to account for changes in the respective states. Useful for controllers e.g. IK, which
is based on pybullet and requires knowledge of simulator state deviations between pybullet and mujoco
Args:
base_pos (3-tuple): x,y,z position of robot base in mujoco world coordinates
base_ori (4-tuple): x,y,z,w orientation or robot base in mujoco world coordinates
"""
pass
def update_initial_joints(self, initial_joints):
"""
Updates the internal attribute self.initial_joints. This is useful for updating changes in controller-specific
behavior, such as with OSC where self.initial_joints is used for determine nullspace actions
This function can also be extended by subclassed controllers for additional controller-specific updates
Args:
initial_joints (Iterable): Array of joint position values to update the initial joints
"""
self.initial_joint = | np.array(initial_joints) | numpy.array |
# Copyright 2020 The Caer Authors. All Rights Reserved.
#
# Licensed under the MIT License (see LICENSE);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at <https://opensource.org/licenses/MIT>
#
# ==============================================================================
#pylint: disable=bare-except
import numpy as np
from ._split import train_test_split
from .path import listdir
def median(arr, axis=None):
return np.median(arr, axis=axis)
def npmean(arr):
return np.mean(arr)
def array(obj, dtype=None, order='K'):
return np.array(obj, dtype=dtype, order=order)
def to_array(obj, dtype=None, order='K'):
return np.array(obj, dtype=dtype, order=order)
def asarray(obj, dtype=None, order=None):
return np.asarray(obj, dtype=dtype, order=order)
def load(filename, allow_pickle=False):
return | np.load(filename, allow_pickle=allow_pickle) | numpy.load |
# python -m unittest tests/test_ml_training.py
import copy
import numpy as np
import pandas as pd
import os
import shutil
import unittest
from collections import OrderedDict
from subroutines.exceptions import AlgorithmError, create_generator
from subroutines.train import (
make_separate_subclass_splits, bootstrap_data, make_feat_importance_plots,
check_arguments, RunML
)
class TestClass(unittest.TestCase):
def test_make_separate_subclass_splits(self):
"""
Tests make_separate_subclass_splits in train.py
"""
print('Testing make_separate_subclass_splits')
exp_input_dict = {
1: [['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'],
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
2: [np.array([['A', 'B', 'C', 'D'], ['B', 'A', 'D', 'C'], ['C', 'A', 'D', 'B']], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
3: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', np.nan, 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
4: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
[['A', 'C'], ['B', 'D']]],
5: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([[np.nan, 'C'], ['B', 'D']], dtype=object)],
6: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'A']], dtype=object)],
7: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
8: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D'], ['E', 'F']], dtype=object)],
9: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)]
}
for num in exp_input_dict.keys():
subclasses = exp_input_dict[num][0]
subclass_splits = exp_input_dict[num][1]
if num == 1:
with self.assertRaises(TypeError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a (1D) '
'array of subclass values'
)
elif num == 2:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a 1D array'
)
elif num == 3:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in '
'"subclasses" array'
)
elif num == 4:
with self.assertRaises(TypeError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclass_splits" to be a '
'(2D) array of subclass values'
)
elif num == 5:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in '
'"subclass_splits" array'
)
elif num == 6:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Repeated subclass labels detected '
'in "subclass_splits"'
)
elif num == 7:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Subclass E is found in '
'"subclasses" but not "subclass_splits"'
)
elif num == 8:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Subclass F is found in '
'"subclass_splits" but not "subclasses"'
)
elif num == 9:
exp_split = (sub_list for sub_list in
[np.array([0, 2, 5, 7, 8, 9]),
np.array([1, 3, 4, 6, 10, 11])])
act_split = make_separate_subclass_splits(subclasses, subclass_splits)
for i, split_1 in enumerate(list(exp_split)):
for j, split_2 in enumerate(list(act_split)):
if i == j:
np.testing.assert_equal(split_1, split_2)
def test_bootstrap_data(self):
"""
Tests bootstrap_data in train.py
"""
print('Testing bootstrap_data')
exp_input_dict = {
1: [[[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]],
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], True],
2: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
['1', '2', '3'], True],
3: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array([['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']]),
['1', '2', '3'], True],
4: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']),
['1', '2', '3'], True],
5: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
np.array(['1', '2', '3']), True],
6: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3', '4'], True],
7: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], 1.0],
8: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], False],
9: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], True]
}
for num in exp_input_dict.keys():
x = exp_input_dict[num][0]
y = exp_input_dict[num][1]
features = exp_input_dict[num][2]
scale = exp_input_dict[num][3]
if num == 1:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of x'
' values'
)
if num == 2:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of y'
' values'
)
if num == 3:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a 1D array of y '
'values'
)
if num == 4:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Different numbers of rows in '
'arrays "x" and "y"'
)
if num == 5:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "features" to be a list'
)
if num == 6:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect entries in "features" list '
'to correspond to the columns in "x"'
)
if num == 7:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "scale" to be a Boolean '
'value (either True or False)'
)
if num == 8:
exp_out_x = pd.DataFrame(
np.array([[1.0, 1.5, 1.2],
[3.4, 2.5, 1.4],
[4.6, 2.3, 2.1],
[1.8, 1.1, 0.6],
[0.7, 0.9, 0.7],
[4.1, 3.3, 2.6],
[4.0, 4.0, 3.1],
[1.0, 1.5, 1.2],
[3.4, 2.5, 1.4],
[4.1, 3.3, 2.6]]),
index=None, columns=features
)
exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f']
act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True)
pd.testing.assert_frame_equal(exp_out_x, act_out_x)
self.assertEqual(exp_out_y, act_out_y)
if num == 9:
exp_out_x = pd.DataFrame(
np.array([[-0.83478261, -0.5625, -0.15686275],
[0., 0.0625, 0.],
[0.4173913, -0.0625, 0.54901961],
[-0.55652174, -0.8125, -0.62745098],
[-0.93913043, -0.9375, -0.54901961],
[0.24347826, 0.5625, 0.94117647],
[0.20869565, 1., 1.33333333],
[-0.83478261, -0.5625, -0.15686275],
[0., 0.0625, 0.],
[0.24347826, 0.5625, 0.94117647]]),
index=None, columns=features
)
exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f']
act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True)
pd.testing.assert_frame_equal(exp_out_x, act_out_x)
self.assertEqual(exp_out_y, act_out_y)
def test_make_feat_importance_plots(self):
"""
Tests make_feat_importance_plots in train.py
"""
print('Testing make_feat_importance_plots')
input_feat_importances = {
'Feature_1': [7.8, 8.7, 0.1, 8.1, 0.4],
'Feature_2': [6.4, 0.1, 0.6, 8.3, 5.2],
'Feature_3': [7.1, 8.4, 0.0, 9.3, 2.5],
'Feature_4': [3.4, 2.1, 1.6, 5.6, 9.4],
'Feature_5': [8.5, 3.4, 6.6, 6.4, 9.0],
'Feature_6': [3.5, 4.3, 8.9, 2.3, 4.1],
'Feature_7': [6.5, 8.4, 2.1, 3.2, 7.8],
'Feature_8': [8.2, 4.7, 4.3, 1.0, 4.3],
'Feature_9': [8.2, 5.6, 5.0, 0.8, 0.9],
'Feature_10': [1.9, 4.0, 0.5, 6.0, 7.8]
}
input_results_dir = 'tests/Temp_output'
input_plt_name = 'PlaceHolder'
for num in range(1, 7):
if num == 1:
with self.assertRaises(FileNotFoundError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'Directory {} does not exist'.format(input_results_dir)
)
elif num == 2:
os.mkdir(input_results_dir)
with open('{}/{}_feat_importance_percentiles.svg'.format(
input_results_dir, input_plt_name
), 'w') as f:
f.write('PlaceHolder')
with self.assertRaises(FileExistsError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'File {}/{}_feat_importance_percentiles.svg already exists '
'- please rename this file so it is not overwritten by '
'running this function'.format(input_results_dir, input_plt_name)
)
shutil.rmtree(input_results_dir)
elif num == 3:
os.mkdir(input_results_dir)
with open('{}/{}_feat_importance_all_data.svg'.format(
input_results_dir, input_plt_name
), 'w') as f:
f.write('PlaceHolder')
with self.assertRaises(FileExistsError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'File {}/{}_feat_importance_all_data.svg already exists - '
'please rename this file so it is not overwritten by '
'running this function'.format(input_results_dir, input_plt_name)
)
shutil.rmtree(input_results_dir)
elif num == 4:
os.mkdir(input_results_dir)
with self.assertRaises(TypeError) as message:
make_feat_importance_plots(
pd.DataFrame({}), input_results_dir, input_plt_name, True
)
self.assertEqual(
str(message.exception),
'Expect "feature_importances" to be a dictionary of '
'importance scores'
)
shutil.rmtree(input_results_dir)
elif num == 5:
os.mkdir(input_results_dir)
with self.assertRaises(TypeError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir, 1.0, True
)
self.assertEqual(
str(message.exception),
'Expect "plt_name" to a string to append to the start of '
'the names of the saved plots'
)
shutil.rmtree(input_results_dir)
elif num == 6:
os.mkdir(input_results_dir)
exp_importance_df = pd.DataFrame({
'Feature': ['Feature_1', 'Feature_3', 'Feature_5', 'Feature_7',
'Feature_2', 'Feature_9', 'Feature_8', 'Feature_6',
'Feature_10', 'Feature_4'],
'Score': [7.8, 7.1, 6.6, 6.5, 5.2, 5.0, 4.3, 4.1, 4.0, 3.4],
'Lower conf limit': [0.13, 0.25, 3.7, 2.21, 0.15, 0.81,
1.33, 2.42, 0.64, 1.65],
'Upper conf limit': [8.64, 9.21, 8.95, 8.34, 8.11, 7.94,
7.85, 8.44, 7.62, 9.02]
})
exp_cols = [
'Feature_1', 'Feature_2', 'Feature_3', 'Feature_4', 'Feature_5',
'Feature_6', 'Feature_7', 'Feature_8', 'Feature_9', 'Feature_10'
]
exp_cols_all = [
'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1',
'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2',
'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3',
'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4',
'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5',
'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6',
'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7',
'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8',
'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9',
'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10'
]
exp_all_vals = [
7.8, 8.7, 0.1, 8.1, 0.4, 6.4, 0.1, 0.6, 8.3, 5.2, 7.1, 8.4,
0.0, 9.3, 2.5, 3.4, 2.1, 1.6, 5.6, 9.4, 8.5, 3.4, 6.6, 6.4,
9.0, 3.5, 4.3, 8.9, 2.3, 4.1, 6.5, 8.4, 2.1, 3.2, 7.8, 8.2,
4.7, 4.3, 1.0, 4.3, 8.2, 5.6, 5.0, 0.8, 0.9, 1.9, 4.0, 0.5,
6.0, 7.8]
exp_median_vals = [7.8, 5.2, 7.1, 3.4, 6.6, 4.1, 6.5, 4.3, 5.0, 4.0]
exp_lower_conf_limit_vals = [
0.13, 0.15, 0.25, 1.65, 3.7, 2.42, 2.21, 1.33, 0.81, 0.64
]
exp_upper_conf_limit_vals = [
8.64, 8.11, 9.21, 9.02, 8.95, 8.44, 8.34, 7.85, 7.94, 7.62
]
(
act_importance_df, act_cols, act_cols_all, act_all_vals,
act_median_vals, act_lower_conf_limit_vals,
act_upper_conf_limit_vals
) = make_feat_importance_plots(
input_feat_importances, input_results_dir, input_plt_name,
True
)
pd.testing.assert_frame_equal(exp_importance_df, act_importance_df)
self.assertEqual(exp_cols, act_cols)
self.assertEqual(exp_cols_all, act_cols_all)
np.testing.assert_almost_equal(exp_all_vals, act_all_vals, 7)
np.testing.assert_almost_equal(
exp_median_vals, act_median_vals, 7
)
np.testing.assert_almost_equal(
exp_lower_conf_limit_vals, act_lower_conf_limit_vals, 7
)
np.testing.assert_almost_equal(
exp_upper_conf_limit_vals, act_upper_conf_limit_vals, 7
)
shutil.rmtree(input_results_dir)
def test_check_arguments(self):
"""
Tests check_arguments in train.py
"""
print('Testing check_arguments')
# Sets "recognised" parameter values that will not raise an exception
x_train = np.array([])
y_train = np.array([])
train_groups = np.array([])
x_test = np.array([])
y_test = np.array([])
selected_features = []
splits = [(y_train, np.array([]))]
const_split = True
resampling_method = 'no_balancing'
n_components_pca = None
run = 'randomsearch'
fixed_params = {}
tuned_params = {}
train_scoring_metric = 'accuracy'
test_scoring_funcs = {}
n_iter = None
cv_folds_inner_loop = 5
cv_folds_outer_loop = 5
draw_conf_mat = True
plt_name = ''
# "Recognised" parameter values should not raise an exception
output_str = check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(output_str, 'All checks passed')
# "Unrecognised" parameter values should raise an exception
# Tests x_train type
x_train_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train_str, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "x_train" to be a numpy array of '
'training data fluorescence readings'
)
# Tests y_train type
y_train_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train_str, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_train" to be a numpy array of '
'training data class labels'
)
# Tests train_groups type
train_groups_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups_str, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "train_groups" to be a numpy array '
'of training data subclass labels'
)
# Tests x_test type
x_test_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_str, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "x_test" to be a numpy array of '
'test data fluorescence readings'
)
# Tests y_test type
y_test_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test_str,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_test" to be a numpy array of '
'test data class labels'
)
# Tests y_train is a 1D array
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([[2, 2], [2, 2], [2, 2], [2, 2]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups, x_test,
y_test, selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_train" to be a 1D array'
)
# Tests mismatch in x_train and y_train shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2, 2])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups, x_test,
y_test, selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_train" and "y_train"'
)
# Tests train_groups is a 1D array
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([[3], [3], [3], [3]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test, y_test, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "train_groups" to be a 1D array'
)
# Tests mismatch in x_train and train_groups shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test, y_test, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_train" and "train_groups"'
)
# Tests y_test is a 1D array
x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]])
y_test_array = np.array([[5, 5], [5, 5], [5, 5], [5, 5]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_array,
y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_test" to be a 1D array'
)
# Tests mismatch in x_test and y_test shape
x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]])
y_test_array = np.array([5, 5, 5, 5, 5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_array,
y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_test" and "y_test"'
)
# Tests mismatch in x_train and x_test shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4, 4], [4, 4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of features incorporated '
'in the training and test data'
)
# Tests no NaN in x_train
x_train_array = np.array([[1, np.nan], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "x_train" data'
)
# Tests no non-numeric entries in x_train
x_train_array = np.array([[1, 1], [1, 'X'], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Non-numeric value(s) in "x_train" - expect'
' all values in "x_train" to be integers / floats'
)
# Tests no NaN in y_train
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, np.nan, 2])
train_groups_array = | np.array([3, 3, 3, 3]) | numpy.array |
import numpy as np
from numba import jit, prange
from scipy.fftpack import fft2, next_fast_len
from dautil.util import zero_padding
from tail.numba_wrap import fftfreq
from tail.util import fill_nan, norm_fft, normalize_row
@jit(nopython=True, nogil=True, parallel=True)
def _bin_psd2(pixel_size, l_max, mask):
'''identical to ``_bin_psd2_cross`` except
that mask1 == mask2
'''
N = mask.shape[0]
freq = fftfreq(N, pixel_size)
n = l_max + 1
psd_1d = np.zeros(n)
hit = np.zeros(n, dtype=np.int64)
pi_2 = np.pi * 2.
for i in prange(N):
freq_i = freq[i]
for j in range(N):
freq_j = freq[j]
l = int(round(pi_2 * np.sqrt(freq_i * freq_i + freq_j * freq_j)))
idx = l if l < l_max else l_max
hit[idx] += 1
# psd_2d
mask_ij = mask[i, j]
real = mask_ij.real
imag = mask_ij.imag
psd_1d[idx] += real * real + imag * imag
psd_1d = psd_1d[:-1]
hit = hit[:-1]
for i in range(l_max):
hit_ = hit[i]
psd_1d[i] = psd_1d[i] / hit_ if hit_ > 0 else np.nan
fill_nan(psd_1d)
return psd_1d
@jit(nopython=True, nogil=True, parallel=True)
def _bin_psd2_cross(pixel_size, l_max, mask1, mask2):
'''bins 2d fft to 1d integers
'''
N = mask1.shape[0]
freq = fftfreq(N, pixel_size)
n = l_max + 1
psd_1d = np.zeros(n)
hit = np.zeros(n, dtype=np.int64)
pi_2 = np.pi * 2.
for i in prange(N):
freq_i = freq[i]
for j in range(N):
freq_j = freq[j]
l = int(round(pi_2 * np.sqrt(freq_i * freq_i + freq_j * freq_j)))
idx = l if l < l_max else l_max
hit[idx] += 1
# psd_2d
mask1_ij = mask1[i, j]
mask2_ij = mask2[i, j]
psd_1d[idx] += mask1_ij.real * mask2_ij.real + mask1_ij.imag * mask2_ij.imag
psd_1d = psd_1d[:-1]
hit = hit[:-1]
for i in range(l_max):
hit_ = hit[i]
psd_1d[i] = psd_1d[i] / hit_ if hit_ > 0 else np.nan
fill_nan(psd_1d)
return psd_1d
def _get_W(l_max, pixel_size, mask1, mask2=None, l_min=1):
'''if ``mask2 is None``, get auto-psd of ``mask1``,
else cross-psd of ``mask1`` and ``mask2``.
return the 1d-spectrum, binned to integers up to (but not include) ``l_max``
'''
def _get_fft(mask, n_x):
mask = zero_padding(mask, (n_x, n_x))
return fft2(mask) * norm_fft(mask)
n_x = max(int(round(np.pi / (pixel_size * l_min))), mask1.shape[0])
n_x = next_fast_len(n_x)
mask1_fft = _get_fft(mask1, n_x)
mask2_fft = None if mask2 is None else _get_fft(mask2, n_x)
W = _bin_psd2(pixel_size, l_max, mask1_fft) if mask2_fft is None else \
_bin_psd2_cross(pixel_size, l_max, mask1_fft, mask2_fft)
return W
@jit(nopython=True, nogil=True)
def _J_t(k1, k2, k3):
'''See Eq. A10 from MASTER paper
it actually returns J_t * pi / 2 because overall scale doesn't matter
'''
k1_2 = k1 * k1
k2_2 = k2 * k2
k3_2 = k3 * k3
temp = 2 * (k1_2 * k2_2 + k2_2 * k3_2 + k3_2 * k1_2) - k1_2 * k1_2 - k2_2 * k2_2 - k3_2 * k3_2
# factor of 2 / pi ignored
# return 2. / (np.pi * np.sqrt(temp)) if temp > 0 else 0.
return 1. / np.sqrt(temp) if temp > 0 else 0.
@jit(nopython=True, nogil=True)
def _get_alpha(k1, k2, k3):
'''return the angle in [0, pi], corresponds to k1
made in the triangle of k1, k2, k3
essentially just cosine rule
'''
return np.arccos((k2 * k2 + k3 * k3 - k1 * k1) / (2 * k2 * k3))
def _get_J_p(Mtype, pure='hybrid'):
'''supported cases:
('EEEE', 'hybrid'),
('BBBB', 'hybrid'),
('TETE', 'hybrid'),
('TBTB', 'hybrid'),
('EBEB', 'hybrid'),
('EBEB', 'pseudo')
To include other cases, port them from commit 70fba3c.
'''
@jit(nopython=True, nogil=True)
def tete(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
return np.cos(2. * alpha3)
@jit(nopython=True, nogil=True)
def eeee(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
temp = np.cos(2. * alpha3)
return temp * temp
@jit(nopython=True, nogil=True)
def ebeb_pseudo(k1, k2, k3):
alpha3 = _get_alpha(k3, k1, k2)
return | np.cos(4. * alpha3) | numpy.cos |
""" script_bootstrap_pca.py - bootstrap uncertainties on PCA coefficients
<NAME>, University of Notre Dame & STScI, <EMAIL>, 2019-01-16
"""
import numpy as np
from astropy.table import Table
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from astropy.stats import bootstrap
import matplotlib.pyplot as plt
import seaborn as sns
# TODO move to util.py
################
# copied from PCASignificance.py
################
Z_CUT = 0.2
HR_CUT = 0.7
# import data for PCA analysis
## HR
HR = pd.read_csv('data/Campbell_local.tsv', sep='\\t',
usecols=['SNID', 'redshift', 'hr', 'err_mu'], index_col='SNID')
HR.rename(columns={'err_mu': 'hr uncert'}, inplace=True)
HR = HR[HR['redshift'] < Z_CUT]
HR = HR[HR['hr'] < HR_CUT]
# print('Hubble Residual:')
# print(HR.describe())
## SALT2 parameters (x_1 & c)
t = Table.read('data/SDSS_Photometric_SNe_Ia.fits')
salt = t['CID', 'Z', 'X1', 'X1_ERR', 'COLOR', 'COLOR_ERR'].to_pandas()
salt.columns = salt.columns.str.lower()
salt.rename(columns={'cid': 'SNID', 'z': 'redshift'}, inplace=True)
salt.set_index('SNID', inplace=True)
# print('\nSALT2 parameters:')
# print(salt.describe())
## stellar mass
galaxy = pd.read_csv('resources/kcorrect_stellarmass.csv',
usecols=['GAL', 'redshift', 'stellarmass'], index_col='GAL')
galaxy.rename(columns={'redshift': 'gal redshift', 'stellarmass': 'stellar mass'}, inplace=True)
# print('\nGalaxy Stellar Mass:')
# print(galaxy.describe())
## age
age = pd.read_csv('resources/ages_campbell.tsv', sep='\\t', skiprows=[1],
usecols=['# sn id', 'age'], dtype={'age': np.float64, '# sn id': np.int})
age.rename(columns={'# sn id': 'SNID'}, inplace=True)
age.set_index('SNID', inplace=True)
# print('\nLocal Envir. Ages:')
# print(age.describe())
# combine into on array
data = pd.concat([HR, salt, galaxy, age], axis=1)
data.dropna(inplace=True)
data['stellar mass'] = np.log10(data['stellar mass'])
# print('\nAnalysis Data:')
# print(data.describe())
################
#end copied from PCASignificance.py
################
# PCA preprocessing
pca = PCA(n_components=4)
FEATURES = ['x1', 'color', 'stellar mass', 'age']
#x = data.loc[:, FEATURES].values
scaled_data = StandardScaler().fit_transform(data.loc[:, FEATURES].values)
# run PCA
#principal_components = pca.fit_transform(x)
pca.fit(scaled_data)
original_components = pca.components_
print(original_components)
# Perform bootstrap
def bootfunc(x):
"""Stats to be performed on each bootstrap re-sample.
This function performs PCA, gets PC1, then converts to same
handedness as on the original data set.
"""
pc1 = pca.fit(x).components_[0]
if original_components[0].dot(pc1) < 0:
pc1 = -pc1
return pc1
# bootresult = bootstrap(scaled_data, 3)
# print('test:', bootresult.shape, scaled_data.shape,
# bootresult[0].shape, bootfunc(scaled_data).shape)
bootresult = bootstrap(scaled_data, 100000, bootfunc=bootfunc)
print(bootresult[:5])
# convert so all bootstrap results are using the same handed coordinate system
# if pca.components_[0].dot()
| np.savetxt('resources/pca_bootstrap.csv', bootresult, delimiter=',') | numpy.savetxt |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
# In[2]:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_green = np.array([36, 25, 25])
upper_green = | np.array([70, 255,255]) | numpy.array |
"""Provide the payoff settings to be used in the Sampling Paradigm.
main file: sp.py
For more information, see also the following keys within the
"task-sp_events.json" file: action_type, action, outcome
"""
import itertools
import numpy as np
def get_payoff_settings(ev_diff):
"""Provide a set of possible payoff distributions.
For two payoff distributions with two outcomes each, provide settings in
the form of an array, where each row is a setting for the two payoff
distributions and the meaning of the columns is as follows: outcome 1.1,
outcome 1.2, probability 1.1, probability 1.2, outcome 2.1, outcome 2.2,
probability 2.1, probability 2.2.
Parameters
----------
ev_diff : float
"Expected value difference". The difference in expected value between
the two payoff distributions that we want for each setting.
Returns
-------
payoff_settings : ndarray, shape (n, 8)
Subset of all possible payoff distribution settings.
"""
# Define the numbers we are working with for the probabilities of the
# outcomes, and their magnitudes.
initial_probs = np.arange(0.1, 1, 0.1)
initial_mags = np.arange(1, 10)
# Get all possible settings for a single distribution of two outcomes
# ===================================================================
# For single distribution, we have two possible outcomes. Take k unordered
# outcomes from n possibilities; the number is given by binomial
# coefficient:
# np.math.factorial(n) / (np.math.factorial(k)*np.math.factorial(n-k))
k = 2
single_mags = np.array(list(itertools.combinations(initial_mags, k)))
# Now map each possible magnitude combination to all probability
# combinations. Example: magnitudes [1,2] can be obtained with
# probabilities [0.1, 0.9] or with probabilities [0.9, 0.1], ...
single_probs = np.array(list(zip(initial_probs, 1 - initial_probs)))
mags = np.repeat(single_mags,
repeats=len(single_probs),
axis=0)
probs = np.tile(single_probs, reps=(len(single_mags), 1))
# single payoff distribution array: each row is a possible setting of
# dimensions: magnitude1, magnitude2, probability1, probability2
single_distr = np.concatenate((mags, probs), axis=1)
# Get all possible settings for two distributions
# ===============================================
# Get all possible combinations for two payoff distributions
# (36*9)**2 ... i.e., 36 magnitudes*9 probabilites
# to the power of two
two_distrs = np.empty(((36*9)**2, 8))
two_distrs[:] = np.nan
for i in range(len(single_distr)):
__ = np.roll(single_distr, shift=i, axis=0)
data = list(zip(__, single_distr))
two_distrs[i*len(single_distr):(1+i)*len(single_distr),
:] = np.array(data).reshape(-1, 8)
# Select a subset of distributions from all those that are possible
# =================================================================
# Calculate the expected value of each payoff distribution
# then select payoff distribution settings based on difference
# between EVs ... for example, only equal payoff distribution settings
# Or where the difference is >0, but <1
evs = list()
for row in two_distrs:
ev1 = row[0]*row[2] + row[1]*row[3]
ev2 = row[4]*row[6] + row[5]*row[7]
evs.append(np.abs(ev1-ev2))
# sanity check, then use as array (round to 14 decimals to avoid weird
# floating point arithmetic)
assert not np.isnan(two_distrs).all()
evs = np.round(np.array(evs), 14)
# Now we make use of the expected value difference that was set as a
# parameter to the function call, to determine which subset of possible
# payoff distribution setttings we want.
ev_payoff_settings = two_distrs[np.where(evs == ev_diff)]
ev_payoff_settings = np.round(ev_payoff_settings, 14)
# Take subset of payoff distribtions: only if we have 4 distinct outcomes
# =======================================================================
payoff_settings = None
for row in ev_payoff_settings:
if len(np.unique(row[[0, 1, 4, 5]])) == 4:
if payoff_settings is None:
payoff_settings = | np.expand_dims(row, axis=0) | numpy.expand_dims |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import warnings
from itertools import combinations_with_replacement as combinations_w_r
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import boxcox, nanpercentile, nanmedian
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray or None, shape (n_features,)
Per feature relative scaling of the data. Equal to ``None`` when
``with_std=False``.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray or None, shape (n_features,)
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray or None, shape (n_features,)
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_samples_seen_ : int or array, shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are not missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, <NAME>., <NAME>, and <NAME>. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
if (hasattr(self, 'n_samples_seen_') and
isinstance(self.n_samples_seen_, (int, np.integer))):
self.n_samples_seen_ = np.repeat(self.n_samples_seen_,
X.shape[1]).astype(np.int64)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
counts_nan = sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape).sum(axis=0).A.ravel()
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = (X.shape[0] -
counts_nan).astype(np.int64)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
if hasattr(self, 'scale_'):
self.n_samples_seen_ += X.shape[0] - counts_nan
else:
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler(copy=True)
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the ``transform`` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler(copy=True, quantile_range=(25.0, 75.0), with_centering=True,
with_scaling=True)
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = check_array(X, accept_sparse='csc', copy=self.copy, estimator=self,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
self.center_ = nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(nanpercentile(column_data,
self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X, accept_sparse=True).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
The data to transform, row by row.
Sparse input should preferably be in CSC format.
Returns
-------
XP : np.ndarray or CSC sparse matrix, shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES, accept_sparse='csc')
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
if sparse.isspmatrix(X):
columns = []
for comb in combinations:
if comb:
out_col = 1
for col_idx in comb:
out_col = X[:, col_idx].multiply(out_col)
columns.append(out_col)
else:
columns.append(sparse.csc_matrix(np.ones((X.shape[0], 1))))
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
else:
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
for i, comb in enumerate(combinations):
XP[:, i] = X[:, comb].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer(copy=True, norm='l2')
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer(copy=True, threshold=0.0)
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = .5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col[isfinite_mask] = np.interp(X_col_finite,
self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if (not accept_sparse_negative and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts'
' non-negative sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
class PowerTransformer(BaseEstimator, TransformerMixin):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform and the
Yeo-Johson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
method : str, (default='yeo-johnson')
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
standardize : boolean, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : boolean, optional, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : array of float, shape (n_features,)
The parameters of the power transformation for the selected features.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer(copy=True, method='yeo-johnson', standardize=True)
>>> print(pt.lambdas_)
[1.38668178e+00 5.93926346e-09]
>>> print(pt.transform(data))
[[-1.31616039 -0.70710678]
[ 0.20998268 -0.70710678]
[ 1.1061777 1.41421356]]
See also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] <NAME> and <NAME>, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] <NAME> and <NAME>, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
"""
def __init__(self, method='yeo-johnson', standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : Ignored
Returns
-------
self : object
"""
self._fit(X, y=y, force_transform=False)
return self
def fit_transform(self, X, y=None):
return self._fit(X, y, force_transform=True)
def _fit(self, X, y=None, force_transform=False):
X = self._check_input(X, check_positive=True, check_method=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
optim_function = {'box-cox': self._box_cox_optimize,
'yeo-johnson': self._yeo_johnson_optimize
}[self.method]
self.lambdas_ = []
for col in X.T:
with np.errstate(invalid='ignore'): # hide NaN warnings
lmbda = optim_function(col)
self.lambdas_.append(lmbda)
self.lambdas_ = np.array(self.lambdas_)
if self.standardize or force_transform:
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
def transform(self, X):
"""Apply the power transform to each feature using the fitted lambdas.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to be transformed using a power transformation.
Returns
-------
X_trans : array-like, shape (n_samples, n_features)
The transformed data.
"""
check_is_fitted(self, 'lambdas_')
X = self._check_input(X, check_positive=True, check_shape=True)
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
X = self._scaler.transform(X)
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda + 1) ** (1 / lambda)
The inverse of the Yeo-Johnson transformation is given by::
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
Parameters
----------
X : array-like, shape (n_samples, n_features)
The transformed data.
Returns
-------
X : array-like, shape (n_samples, n_features)
The original data
"""
check_is_fitted(self, 'lambdas_')
X = self._check_input(X, check_shape=True)
if self.standardize:
X = self._scaler.inverse_transform(X)
inv_fun = {'box-cox': self._box_cox_inverse_tranform,
'yeo-johnson': self._yeo_johnson_inverse_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = inv_fun(X[:, i], lmbda)
return X
def _box_cox_inverse_tranform(self, x, lmbda):
"""Return inverse-transformed input x following Box-Cox inverse
transform with parameter lambda.
"""
if lmbda == 0:
x_inv = np.exp(x)
else:
x_inv = (x * lmbda + 1) ** (1 / lmbda)
return x_inv
def _yeo_johnson_inverse_transform(self, x, lmbda):
"""Return inverse-transformed input x following Yeo-Johnson inverse
transform with parameter lambda.
Notes
-----
We're comparing lmbda to 1e-19 instead of strict equality to 0. See
scipy/special/_boxcox.pxd for a rationale behind this
"""
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if lmbda < 1e-19:
x_inv[pos] = | np.exp(x[pos]) | numpy.exp |
from optparse import OptionParser
from time import time
import numpy as np
import os
import random
import torch
import crypten
import crypten.mpc as mpc
import crypten.communicator as comm
import sys
############## Import modules ##############
sys.path.append("../../")
from modules.AL.dataloader import GenericDataset
from modules.AL.pt_utils import test_torch, test_torch_ensemble
from modules.AL.models.AlexNet2d import AlexNet2d as AlexNet
from modules.AL.utils import make_directory_if_not_exists
from modules import ucr_loader, utils
crypten.init()
torch.set_num_threads(1)
def train_crpyten(options, model, trainX, trainY, n_classes):
trainX_enc = crypten.cryptensor(trainX, src=0)
# One-hot encode labels
label_eye = torch.eye(n_classes)
trainY_one = label_eye[trainY]
perm = np.random.permutation(trainX_enc.shape[0])
shuffled_trainX_enc = trainX_enc[perm]
shuffled_trainY_enc = trainY_one[perm]
criterion = crypten.nn.CrossEntropyLoss()
model.train()
timer_start = time()
for i in range(int(trainX_enc.shape[0]/options.batch_size)):
x = shuffled_trainX_enc[i *
options.batch_size:(i+1)*options.batch_size]
y = shuffled_trainY_enc[i *
options.batch_size:(i+1)*options.batch_size]
# Forward pass
outputs = model(x)
loss = criterion(outputs, y)
# Backward and optimize
model.zero_grad()
loss.backward()
model.update_parameters(options.initial_lr)
overall_time = time() - timer_start
return overall_time
def eval_crpyten(options, model, valX, valY, n_classes):
dummy_size = (options.batch_size -
(valX.shape[0] % options.batch_size)) % options.batch_size
valXd = np.concatenate([valX, np.zeros(
(dummy_size, *valX.shape[1:]))], axis=0) if dummy_size > 0 else valX
valYd = np.concatenate([valY, np.zeros(dummy_size)],
axis=0) if dummy_size > 0 else valY
valX_enc = crypten.cryptensor(valXd, src=0)
# One-hot encode labels
label_eye = torch.eye(n_classes)
valY_one = label_eye[valYd]
with torch.no_grad():
model.eval()
timer_start = time()
for i in range(int(valX_enc.shape[0]/options.batch_size)):
x = valX_enc[i*options.batch_size:(i+1)*options.batch_size]
y = valY_one[i*options.batch_size:(i+1)*options.batch_size]
# Forward pass
outputs = model(x)
if dummy_size > 0 and (i+1) == int(valX_enc.shape[0]/options.batch_size):
outputs = outputs[:-dummy_size]
y = y[:-dummy_size]
overall_time = time() - timer_start
return overall_time
def train_pytorch(options, model, train_loader, use_gpu=True):
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=options.initial_lr)
device = torch.device('cuda' if torch.cuda.is_available() and use_gpu else 'cpu')
# Train the model
model.to(device)
model.train()
timer_start = time()
for step, (x, y) in enumerate(train_loader):
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# Forward pass
outputs = model(x.float())
loss = criterion(outputs, y)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
overall_time = time() - timer_start
return overall_time
def eval_pytorch(options, model, val_loader, use_gpu=True):
device = torch.device('cuda' if torch.cuda.is_available() and use_gpu else 'cpu')
model.to(device)
with torch.no_grad():
model.eval()
timer_start = time()
for step, (x, y) in enumerate(val_loader):
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# Forward pass
outputs = model(x.float())
overall_time = time() - timer_start
return overall_time
def process(options):
np.random.seed(options.seed)
torch.manual_seed(0)
DIR_RESULTS = '../../../results/'
# get all datasets
dataset_dict = ucr_loader.get_datasets(options.root_path, prefix='**/')
# retrieve data
dataset_name = options.dataset_name
trainX, trainY, testX, testY = ucr_loader.load_data(
dataset_dict[dataset_name])
# preprocess data
trainX, trainY, testX, testY = ucr_loader.preprocess_data(
trainX, trainY, testX, testY, normalize=options.normalize, standardize=options.standardize)
# additional preprocessing
trainX, trainY, valX, valY = utils.perform_datasplit(
trainX, trainY, test_split=options.validation_split)
n_classes = len(np.unique(trainY))
if 1: # options.only_batch:
trainX = trainX[:options.batch_size]
trainY = trainY[:options.batch_size]
valX = valX[:options.batch_size]
valY = valY[:options.batch_size]
# Shapes
print('TrainX:', trainX.shape)
print('ValX:', valX.shape)
print('TestX:', testX.shape)
print('Classes:', n_classes)
# Convert to channels first
trainX = torch.tensor(np.expand_dims(trainX.transpose(0, 2, 1), axis=-1))
trainY = torch.tensor(trainY)
valX = torch.tensor(np.expand_dims(valX.transpose(0, 2, 1), axis=-1))
valY = torch.tensor(valY)
# get data loader
train_dataset = GenericDataset(x=trainX, y=trainY)
val_dataset = GenericDataset(x=valX, y=valY)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=options.batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=options.batch_size)
# create plain
model_plain = AlexNet(in_width=trainX.shape[2],
in_channels=trainX.shape[1], num_classes=n_classes)
model = crypten.nn.from_pytorch(
model_plain, torch.empty(1, *trainX.shape[1:]))
model.encrypt()
times = np.zeros((6, options.trys))
for i in range(options.trys):
print('Process run: %s / %s' % (i+1, options.trys))
print('CrypTen...', end='')
times[0, i] = train_crpyten(options=options, model=model,
trainX=trainX, trainY=trainY, n_classes=n_classes)
times[1, i] = eval_crpyten(options=options, model=model,
valX=valX, valY=valY, n_classes=n_classes)
print('PytorchCPU...', end='')
times[2, i] = train_pytorch(options=options, model=model_plain,
train_loader=train_loader, use_gpu=False)
times[3, i] = eval_pytorch(options=options, model=model_plain,
val_loader=val_loader, use_gpu=False)
print('PytorchGPU...', end='')
times[4, i] = train_pytorch(options=options, model=model_plain,
train_loader=train_loader, use_gpu=True)
times[5, i] = eval_pytorch(options=options, model=model_plain,
val_loader=val_loader, use_gpu=True)
print('Finished')
times *= 1000 # milli seconds
model_directory = 'CrypTen_AlexNet_' + dataset_name
experiment_directory = 'BS_%s_Trys_%s' % (
options.batch_size, options.trys)
filename = 'report.txt'
res_save_path = os.path.join(
DIR_RESULTS, model_directory, experiment_directory)
make_directory_if_not_exists(res_save_path)
res_save_path = os.path.join(res_save_path, filename)
approaches = ['TrainCrypten', 'InferCrypten', 'TrainPytorchCPU', 'InferPytorchCPU', 'TrainPytorchGPU', 'InferPytorchGPU']
s = 'Times in milliseconds\n'
for vals, name in zip(times, approaches):
s += '%s: %s | Std: %s | Min: %s |Max: %s\n' % (name, np.average(vals), np.std(vals), np.min(vals), | np.max(vals) | numpy.max |
#!/usr/bin/env python3
import numpy as np
from . import sequence
__all__ = [
"iter_kmers",
"kmer_counts",
"kmer_positions",
"kmer_frequency",
]
def iter_kmers(array, k=3):
"""Generate a sequence of kmer vectors.
Parameters
----------
array : ndarray, int
Array of integers encoding alleles.
k : int, optional
Size of kmers.
Yields
------
kmer : ndarray, int, shape (1, )
Integer encoded alleles of kmer sequence.
Notes
-----
Kmer vectors are padded with gap values (`-1`) to
maintain the sam allele positions as the source sequence.
"""
n_base = array.shape[-1]
n_windows = n_base - (k - 1)
masks = np.zeros((n_windows, n_base), dtype=bool)
for i in range(n_windows):
masks[i][i : i + k] = True
for read in array.reshape(-1, n_base):
for mask in masks:
if np.any(sequence.is_gap(read[mask])):
pass
else:
kmer = np.zeros((n_base), dtype=array.dtype) - 1
kmer[mask] = read[mask]
yield kmer
def kmer_counts(array, k=3):
"""Generate an array of kmer vectors with counts of each kmers occurance.
Parameters
----------
array : ndarray, int
Array of integers encoding alleles.
k : int, optional
Size of kmers.
Returns
-------
kmers : ndarray, int
Integer encoded alleles of kmer sequences.
counts : ndarray, int
Counts of each kmer.
Notes
-----
Kmer vectors are padded with gap values (`-1`) to
maintain the sam allele positions as the source sequence.
"""
kmer = None # handle case of no kmers
kmers_dict = {}
counts_dict = {}
for kmer in iter_kmers(array, k=k):
string = kmer.tobytes()
if string not in kmers_dict:
kmers_dict[string] = kmer
counts_dict[string] = 1
else:
counts_dict[string] += 1
if kmer is None: # handle case of no kmers
return np.array([], dtype=array.dtype), np.array([], dtype=int)
n_base = len(kmer)
n_kmer = len(kmers_dict)
kmers = np.empty((n_kmer, n_base), dtype=array.dtype)
counts = np.empty(n_kmer, dtype=int)
for i, (string, kmer) in enumerate(kmers_dict.items()):
kmers[i] = kmer
counts[i] = counts_dict[string]
return kmers, counts
def kmer_positions(kmers, end=False):
"""Identify base positions of each kmer.
Parameters
----------
kmers : ndarray, int
Integer encoded alleles of kmer sequences.
end : str, optional
Optionally report only the 'start' or 'stop' position
of each kmer.
Returns
-------
positions : ndarray, int
positions of each base within each kmer.
"""
assert end in {False, "start", "stop"}
is_coding = ~sequence.is_gap(kmers)
# detect k
k = np.sum(is_coding, axis=-1)
assert np.all(k[0] == k)
k = k[0]
if end == "start":
return np.where(is_coding)[1][0::k]
elif end == "stop":
return np.where(is_coding)[1][k - 1 :: k]
else:
return np.where(is_coding)[1].reshape(-1, k)
def kmer_frequency(kmers, counts):
"""Calculate the frequency of each kmer among kmers that
overlap it's positional interval.
Parameters
----------
kmers : ndarray, int
Integer encoded alleles of kmer sequences.
counts : ndarray, int
Counts of each kmer.
Returns
-------
frequencies : ndarray, float
Local frequency of each kmer.
"""
is_coding = ~sequence.is_gap(kmers)
# detect k
k = np.sum(is_coding, axis=-1)
assert | np.all(k[0] == k) | numpy.all |
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
zero = 0
one = 1
arr = np.zeros(shape, dt)
gc.collect()
before_zero = sys.getrefcount(zero)
before_one = sys.getrefcount(one)
# Test item getting:
part = arr[index]
after_zero = sys.getrefcount(zero)
assert after_zero - before_zero == count * items_changed
del part
# Test item setting:
arr[index] = one
gc.collect()
after_zero = sys.getrefcount(zero)
after_one = sys.getrefcount(one)
assert before_zero - after_zero == count * items_changed
assert after_one - before_one == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
def test_repr_structured_datetime(self):
dt = | np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')]) | numpy.dtype |
"""Test management of parameters."""
import numpy as np
import pandas as pd
import pytest
from psifr import fr
from cymr import parameters
@pytest.fixture()
def param_def_simple():
param = parameters.Parameters()
param.set_sublayers(f=['task'], c=['task'])
weights = {(('task', 'item'), ('task', 'item')): 'loc'}
param.set_weights('fc', weights)
param.set_weights('cf', weights)
return param
def test_param_simple(param_def_simple):
param_def = param_def_simple
assert param_def.fixed == {}
assert param_def.free == {}
assert param_def.dependent == {}
assert param_def.dynamic == {}
assert param_def.sublayers == {'f': ['task'], 'c': ['task']}
assert param_def.weights['fc'] == {(('task', 'item'), ('task', 'item')): 'loc'}
assert param_def.weights['cf'] == {(('task', 'item'), ('task', 'item')): 'loc'}
assert param_def.sublayer_param == {}
@pytest.fixture()
def param_def():
"""Parameter definitions."""
param = parameters.Parameters()
# options
param.set_options(scope='list')
# general parameter management
param.set_fixed(a=1, b=2)
param.set_fixed({'c': 3})
param.set_dependent(d='2 + mean([a, b])')
param.set_dynamic('study', e='distract / c')
param.set_free(f=[0, 1])
# network definition
param.set_sublayers(f=['task'], c=['loc', 'cat'])
weights = {
(('task', 'item'), ('loc', 'item')): 'loc',
(('task', 'item'), ('cat', 'item')): 'cat',
}
param.set_weights('fc', weights)
param.set_weights('cf', weights)
param.set_weights('ff', {('task', 'item'): 'loc + cat'})
# sublayer-varying parameters
param.set_sublayer_param('c', 'loc', {'B_enc': 'B_enc_loc'})
param.set_sublayer_param('c', 'cat', {'B_enc': 'B_enc_cat'})
return param
def test_param(param_def):
"""Test that parameter definitions are correct."""
assert param_def.options == {'scope': 'list'}
assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}
assert param_def.free == {'f': [0, 1]}
assert param_def.dependent == {'d': '2 + mean([a, b])'}
assert param_def.dynamic == {'study': {'e': 'distract / c'}}
assert param_def.sublayers == {'f': ['task'], 'c': ['loc', 'cat']}
assert param_def.weights['fc'] == {
(('task', 'item'), ('loc', 'item')): 'loc',
(('task', 'item'), ('cat', 'item')): 'cat',
}
assert param_def.weights['cf'] == {
(('task', 'item'), ('loc', 'item')): 'loc',
(('task', 'item'), ('cat', 'item')): 'cat',
}
assert param_def.weights['ff'] == {('task', 'item'): 'loc + cat'}
assert param_def.sublayer_param['c'] == {
'loc': {'B_enc': 'B_enc_loc'},
'cat': {'B_enc': 'B_enc_cat'},
}
@pytest.fixture()
def data():
"""Base test DataFrame."""
data = pd.DataFrame(
{
'subject': [
1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
],
'list': [
1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2
],
'trial_type': [
'study', 'study', 'study', 'recall', 'recall', 'recall',
'study', 'study', 'study', 'recall', 'recall', 'recall',
],
'position': [
1, 2, 3, 1, 2, 3,
1, 2, 3, 1, 2, 3,
],
'item': [
'absence', 'hollow', 'pupil', 'hollow', 'pupil', 'empty',
'fountain', 'piano', 'pillow', 'pillow', 'fountain', 'pillow',
],
'item_index': [
0, 1, 2, 1, 2, np.nan,
3, 4, 5, 5, 3, 5,
],
'task': [
1, 2, 1, 2, 1, np.nan,
1, 2, 1, 1, 1, 1,
],
'distract': [
1, 2, 3, np.nan, np.nan, np.nan,
3, 2, 1, np.nan, np.nan, np.nan,
],
}
)
return data
@pytest.fixture()
def split_data(data):
"""Data split into study and recall."""
merged = fr.merge_free_recall(data, study_keys=['distract'])
split = {
'study': fr.split_lists(merged, 'study', ['input', 'distract']),
'recall': fr.split_lists(merged, 'recall', ['input']),
}
return split
@pytest.fixture()
def patterns():
cat = np.zeros((24, 3))
cat[:8, 0] = 1
cat[8:16, 1] = 1
cat[16:, 2] = 1
sim_cat = np.zeros((24, 24))
sim_cat[:8, :8] = 1
sim_cat[8:16, 8:16] = 1
sim_cat[16:, 16:] = 1
patterns = {
'vector': {
'loc': np.eye(24),
'cat': cat,
},
'similarity': {
'loc': np.eye(24),
'cat': sim_cat,
},
}
return patterns
def test_set_dependent():
param = {'Lfc': 0.7}
dependent = {'Dfc': '1 - Lfc'}
updated = parameters.set_dependent(param, dependent)
expected = {'Lfc': 0.7, 'Dfc': 0.3}
np.testing.assert_allclose(updated['Dfc'], expected['Dfc'])
def test_set_dynamic(data):
param = {'B_distract': 0.2}
dynamic = {'study': {'B_enc': 'distract * B_distract'}}
study_data = fr.filter_data(data, 1, 1, 'study')
study = fr.split_lists(study_data, 'raw', ['distract'])
updated = parameters.set_dynamic(param, study, dynamic['study'])
expected = {'B_distract': 0.2, 'B_enc': [np.array([0.2, 0.4, 0.6])]}
np.testing.assert_allclose(updated['B_enc'][0], expected['B_enc'][0])
def test_dependent(param_def):
"""Test evaluation of dependent parameters."""
param = {'a': 1, 'b': 2}
param = param_def.eval_dependent(param)
assert param == {'a': 1, 'b': 2, 'd': 3.5}
def test_dynamic(param_def, split_data):
"""Test evaluation of dynamic parameters."""
param = {'c': 2}
param = param_def.eval_dynamic(param, study=split_data['study'])
np.testing.assert_array_equal(param['e'][0], np.array([0.5, 1, 1.5]))
np.testing.assert_array_equal(param['e'][1], np.array([1.5, 1, 0.5]))
def test_get_dynamic(param_def, split_data):
"""Test indexing of dynamic parameters."""
param = {'c': 2}
param = param_def.eval_dynamic(param, study=split_data['study'])
param1 = param_def.get_dynamic(param, 0)
np.testing.assert_array_equal(param1['e'], np.array([0.5, 1, 1.5]))
param2 = param_def.get_dynamic(param, 1)
np.testing.assert_array_equal(param2['e'], | np.array([1.5, 1, 0.5]) | numpy.array |
"""Orthogonal matching pursuit algorithms
"""
# Author: <NAME>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import array2d, as_float_array
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.arrayfuncs import solve_triangular
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = | np.sqrt(1 - v) | numpy.sqrt |
"""
Sampling with or without weights, with or without replacement.
"""
import numpy as np
import math
from .cryptorandom import SHA256
def get_prng(seed=None):
"""Turn seed into a PRNG instance
Parameters
----------
seed : {None, int, object}
If seed is None, return a randomly seeded instance of SHA256.
If seed is an int, return a new SHA256 instance seeded with seed.
If seed is already a PRNG instance, return it.
Otherwise raise ValueError.
Returns
-------
object
"""
if seed is None:
seed = np.random.randint(0, 10**10, dtype=np.int64) # generate an integer
return SHA256(seed)
if isinstance(seed, (int, np.integer)):
return SHA256(seed)
if hasattr(seed, "random") and hasattr(seed, "randint"):
return seed
raise ValueError(f'{seed!r} cannot be used to seed a PRNG')
def random_sample(a, size, replace=False, fast=False, p=None, method="sample_by_index", prng=None):
'''
Random sample of size `size` from a population `a` drawn with or without weights,
with or without replacement.
If no weights are provided, the sample is drawn with equal probability of selecting every item.
If weights are provided, len(weights) must equal N.
Sampling methods available are:
* Fisher-Yates: sampling without weights, without replacement
* PIKK: sampling without weights, without replacement
* recursive: samping without weights, without replacement
* Waterman_R: sampling without weights, without replacement
* Vitter_Z: sampling without weights, without replacement
* sample_by_index: sampling without weights, without replacement
* Exponential: sampling with weights, without replacement
* Elimination: sampling with weights, without replacement
Fisher-Yates, PIKK, sample_by_index, Exponential, and Elimination return ordered samples,
i.e. they are equally likely to return [1, 2] as they are to return [2, 1]. Waterman_R,
Vitter_Z, and recursive aren't guaranteed to randomize the order of items in the sample.
Parameters
----------
a : 1-D array-like or int
If an array or list, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., (m, n, k),
then m * n * k samples are drawn.
Default is None, in which case a single value is returned.
replace : boolean, optional
Whether the sample is with or without replacement.
Default False.
fast : boolean, optional
Whether to speed up sampling by not sampling with random order.
Default False.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all entries in a.
method : string
Which sampling function?
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
samples : single item or ndarray
The generated random samples
'''
prng = get_prng(prng)
if isinstance(a, (list, np.ndarray)):
N = len(a)
a = np.array(a)
elif isinstance(a, int):
N = a
a = np.arange(N)
assert N > 0, "Population size must be nonnegative"
else:
raise ValueError("a must be an integer or array-like")
if p is not None:
assert len(p) == N
if not replace:
assert size <= N
methods = {
"Fisher-Yates" : lambda N, n: fykd_sample(N, n, prng=prng),
"PIKK" : lambda N, n: pikk(N, n, prng=prng),
"recursive" : lambda N, n: recursive_sample(N, n, prng=prng),
"Waterman_R" : lambda N, n: waterman_r(N, n, prng=prng),
"Vitter_Z" : lambda N, n: vitter_z(N, n, prng=prng),
"sample_by_index" : lambda N, n: sample_by_index(N, n, replace=replace, fast=fast, prng=prng),
"Exponential" : lambda n, p: exponential_sample(n, p, prng=prng),
"Elimination" : lambda n, p: elimination_sample(n, p, replace=replace, prng=prng)
}
if replace is False and p is None:
try:
sam = np.array(methods[method](N, size), dtype=np.int) - 1 # shift to 0 indexing
except ValueError:
print("Sampling method is incompatible with the inputs")
elif replace is True and method in ['Fisher-Yates', 'PIKK', 'recursive',
'Waterman_R', 'Vitter_Z']:
raise ValueError("Method is meant for sampling without replacement")
elif replace is True and method in ['sample_by_index']:
try:
sam = np.array(methods[method](N, size), dtype=np.int) - 1 # shift to 0 indexing
except ValueError:
print("Sampling method is incompatible with the inputs")
else:
try:
sam = np.array(methods[method](size, p), dtype=np.int) - 1
except ValueError:
print("Sampling method is incompatible with the inputs")
return a[sam]
def random_allocation(a, sizes, replace=False, fast=False, p=None, method="sample_by_index", prng=None):
'''
Random samples of sizes `sizes` from a population `a` drawn with or without replacement.
Parameters
----------
a : 1-D array-like or int
If an array or list, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
sizes : 1-D array-like
sizes of samples to return
replace : boolean, optional
Whether the sampling is with or without replacement.
Default False.
fast : boolean, optional
Whether to speed up sampling by not sampling with random order.
Default False.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all entries in a.
method : string
Which sampling function?
Default sample_by_index
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
samples : list of lists
The generated random samples
'''
if isinstance(a, (list, np.ndarray)):
N = len(a)
a = np.array(a)
indices = np.arange(N)
elif isinstance(a, int):
N = a
a = np.arange(N)
indices = np.arange(N)
assert N > 0, "Population size must be nonnegative"
# raise error if without replacement and sample sizes greater than population size
if not replace and np.sum(sizes) > N:
raise ValueError('sample sizes greater than population size')
samples = [0] * len(sizes)
# sort sizes from smallest to largest
sizes.sort()
# get random samples for all the groups except the largest one
for i in range(len(sizes) - 1):
sam = random_sample(list(indices), sizes[i], replace, fast, p, method, prng)
samples[i] = a[sam]
if not replace:
indices = set(indices) - set(sam)
# get the sample for the largest group
if not replace and N == np.sum(sizes):
sam = list(indices)
else:
sam = random_sample(list(indices), sizes[-1], replace, fast, p, method, prng)
samples[-1] = a[sam]
return samples
def random_permutation(a, method="Fisher-Yates", prng=None):
'''
Construct a random permutation (re-ordering) of a population `a`.
The algorithms available are:
* Fisher-Yates: a shuffling algorithm
* random_sort: generate random floats and sort
* permute_by_index: sample integer indices without replacement
Parameters
----------
a : 1-D array-like or int
If an array or list, a random permutation is generated from its elements.
If an int, the random permutation is generated as if a were np.arange(a)
method : string
Which sampling function?
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
samples : single item or ndarray
The generated random samples
'''
prng = get_prng(prng)
if isinstance(a, (list, np.ndarray)):
N = len(a)
a = np.array(a)
elif isinstance(a, int):
N = a
a = np.arange(N)
assert N > 0, "Population size must be nonnegative"
else:
raise ValueError("a must be an integer or array-like")
methods = {
"Fisher-Yates" : lambda N: fykd_sample(N, N, prng=prng),
"random_sort" : lambda N: pikk(N, N, prng=prng),
"permute_by_index" : lambda N: sample_by_index(N, N, prng=prng),
}
try:
sam = np.array(methods[method](N), dtype=np.int) - 1 # shift to 0 indexing
except ValueError:
print("Bad permutation algorithm")
return a[sam]
###################### Sampling functions #####################################
def fykd_sample(n, k, prng=None):
'''
Use fykd to sample k out of 1, ..., n without replacement
Parameters
----------
n : int
Population size
k : int
Desired sample size
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
a = np.array(range(1, n+1))
rand = prng.random(k)
ind = np.array(range(k))
JJ = np.array(ind + rand*(n - ind), dtype=int)
for i in range(k):
J = JJ[i]
a[i], a[J] = a[J], a[i]
return a[:k]
def pikk(n, k, prng=None):
'''
PIKK Algorithm: permute indices and keep k to draw a sample
from 1, ..., n without replacement.
Contrary to what Python does, this assumes indexing starts at 1.
Parameters
----------
n : int
Population size
k : int
Desired sample size
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
return np.argsort(prng.random(n))[0:k] + 1
def recursive_sample(n, k, prng=None):
'''
Recursive sampling algorithm from Cormen et al
Draw a sample of to sample k out of 1, ..., n without replacement
Note that if k is larger than the default recursion limit of 1000,
this function will throw an error. You can change the recursion
depth using `sys.setrecursionlimit()`.
Parameters
----------
n : int
Population size
k : int
Desired sample size
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
if k == 0:
return np.empty(0, dtype=np.int)
else:
S = recursive_sample(n-1, k-1, prng=prng)
i = prng.randint(1, n+1)
if i in S:
S = np.append(S, [n])
else:
S = np.append(S, [i])
return S
def waterman_r(n, k, prng=None):
'''
Waterman's Algorithm R for reservoir SRSs
Draw a sample of to sample k out of 1, ..., n without replacement
Parameters
----------
n : int
Population size
k : int
Desired sample size
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
S = np.array(range(1, k+1)) # fill the reservoir
for t in range(k+1, n+1):
i = prng.randint(1, t+1)
if i <= k:
S[i-1] = t
return S
def vitter_z(n, k, prng=None):
'''
Vitter's Algorithm Z for reservoir SRSs (Vitter 1985).
Draw a sample of to sample k out of 1, ..., n without replacement
Parameters
----------
n : int
Population size
k : int
Desired sample size
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
def Algorithm_X(n, t):
V = prng.random()
s = 0
numer = math.factorial(t+s+1-n)/math.factorial(t-n)
denom = math.factorial(t+s+1)/math.factorial(t)
frac = numer/denom
while frac > V:
s += 1
numer = (t+s+1-n)*numer
denom = (t+s+1)*denom
frac = numer/denom
return s
def f(x, t):
numer = math.factorial(t-k+x)/math.factorial(t-k-1)
denom = math.factorial(t+x+1)/math.factorial(t)
return numer/denom * k/(t-k)
def g(x, t):
assert x >= 0
return k/(t+x) * (t/(t+x))**k
def h(x, t):
assert x >= 0
return k/(t+1) * ((t-k+1)/(t+x-k+1))**(k+1)
def c(t):
return (t+1)/(t-k+1)
sam = np.array(range(1, k+1)) # fill the reservoir
t = k
while t < n:
# Determine how many unseen records, nu, to skip
if t <= 22*k: # the choice of 22 is taken from Vitter's 1985 ACM paper
nu = Algorithm_X(k, t)
else:
var = -2
U = 2
while U > var:
V = prng.random()
X = t*(V**(-1/k) - 1)
U = prng.random()
if U <= h(np.floor(X), t)/(c(t)*g(X, t)):
break
var = f(np.floor(X), t)/(c(t)*g(X, t))
nu = np.floor(X)
if t+nu < n:
# Make the next record a candidate, replacing one at random
i = prng.randint(0, k)
sam[i] = int(t+nu+1)
t = t+nu+1
return sam
def sample_by_index(n, k, replace=False, fast=False, prng=None):
'''
Select indices uniformly at random to
draw a sample of to sample k out of 1, ..., n without replacement
Parameters
----------
n : int
Population size
k : int
Desired sample size
replace : boolean, optional
Whether the sample is with or without replacement.
Default False.
fast : boolean, optional
Whether to speed up sampling by not sampling with random order.
Default False.
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled, in random order if not fast
'''
# raise error if without replacement and sample size greater than population size
if not replace and k > n:
raise ValueError('sample size greater than population size')
prng = get_prng(prng)
Pop = list(range(1, n + 1))
# check if with replacement
if replace:
w = prng.randint(1, n + 1, size = k)
S = [Pop[i] for i in (w - 1)]
else:
if fast:
num_sample = min(k, n - k)
else:
num_sample = k
# initialize sample
S = []
# sample min of k and n-k indices
for i in range(num_sample):
w = prng.randint(1, n - i + 1)
S.append(Pop[w - 1])
lastvalue = Pop.pop()
if w < (n - i):
Pop[w - 1] = lastvalue # Move last population item to the wth position
if n - k < k and fast:
S = list(set(range(1, n + 1)) - set(S))
return np.array(S)
def elimination_sample(k, p, replace=True, prng=None):
'''
Weighted random sample of size k from 1, ..., n drawn with or without replacement.
The algorithm is inefficient but transparent.
Walker's alias method is more efficient.
Parameters
----------
k : int
Desired sample size
p : 1-D array-like, optional
The probabilities associated with each value in 1, ... n.
replace : boolean, optional
Whether the sample is with or without replacement.
Default True.
prng : {None, int, object}
If prng is None, return a randomly seeded instance of SHA256.
If prng is an int, return a new SHA256 instance seeded with seed.
If prng is already a PRNG instance, return it.
Returns
-------
list of items sampled
'''
prng = get_prng(prng)
weights = np.array(p).astype(float) # ensure the weights are floats
if any(weights < 0):
raise ValueError('negative item weight')
else:
n = len(weights)
if replace:
wc = np.cumsum(weights)/np.sum(weights) # normalize the weights
sam = prng.random(size=k)
return wc.searchsorted(sam)+1
else:
if k > n:
raise ValueError('sample size larger than population in \
sample without replacement')
elif k == n:
return np.array(range(k))
else:
weights_left = np.copy(weights)
indices_left = list(range(n))
sam = np.full(k, -1)
for i in range(k):
# normalize remaining weights
wc = | np.cumsum(weights_left) | numpy.cumsum |
#!/usr/bin/env python3
#
# Calculates the mean and std of temperatures used in midpoint reports
#
import base
import numpy as np
import matplotlib.pyplot as pl
DEBUG = False
def tasks():
"""
Returns a list of the tasks in this file.
"""
return [
MidpointTemperatures(),
]
class MidpointTemperatures(base.Task):
def __init__(self):
super(MidpointTemperatures, self).__init__('midpoint_temperatures')
self._set_data_subdir('midpointswt')
def gather(self, q):
# Query db
tmin, tmax, tmid = [], [], []
with base.connect() as con:
c = con.cursor()
for row in c.execute(q):
tmin.append(row['tmin'])
tmax.append(row['tmax'])
tmid.append(0.5 * (row['tmin'] + row['tmax']))
# Create list of tmin, tmax, tmid
tmin = np.array(tmin)
tmax = np.array(tmax)
tmid = np.array(tmid)
print('TMid, mean: ' + str(np.mean(tmin)))
print('TMid, std : ' + str(np.std(tmid)))
print('2Sigma range: ' + str(4*np.std(tmid)))
print('Min, max: ' + str( | np.min(tmin) | numpy.min |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
import ExpUtils as util
class MFNN:
def __init__(self, config, TF_GPU_USAGE=0.25):
self.config = config
self.SynData = self.config['SynData']
self.dim = self.SynData.dim
self.M = self.SynData.Nfid
self.MfData = self.SynData.data
self.encode = self.config['feature']
# Train/Test Input/Output holders
self.tf_Xtrain_list = []
self.tf_ytrain_list = []
self.tf_Xtest_list = []
self.tf_ytest_list = []
for m in range(self.M):
self.tf_Xtrain_list.append(tf.compat.v1.placeholder(util.tf_type, [None, self.dim]))
self.tf_ytrain_list.append(tf.compat.v1.placeholder(util.tf_type, [None, 1]))
self.tf_Xtest_list.append(tf.compat.v1.placeholder(util.tf_type, [None, self.dim]))
self.tf_ytest_list.append(tf.compat.v1.placeholder(util.tf_type, [None, 1]))
# Linear Mapping Weights
self.tf_Wvar_Chol_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
scale=1.0 # initialize with smaller values when there are numerical erros
Lm = tf.linalg.band_part(scale*tf.Variable(tf.eye(Km+1), dtype=util.tf_type), -1, 0)
self.tf_Wvar_Chol_list.append(Lm)
#
self.tf_W_list = []
self.tf_Wm_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
dist_noise = tfp.distributions.MultivariateNormalDiag(loc=tf.zeros([Km+1]), scale_diag=tf.ones([Km+1]))
Wm = tf.Variable(tf.random.truncated_normal([Km+1,1]), dtype=util.tf_type)
self.tf_Wm_list.append(Wm)
self.tf_W_list.append(Wm+self.tf_Wvar_Chol_list[m]@tf.reshape(dist_noise.sample(),[-1,1]))
# noise prior
self.tf_log_gam_a = tf.Variable(-10, dtype=util.tf_type)
self.tf_log_gam_b = tf.Variable(-10, dtype=util.tf_type)
self.noise_gam_prior = tfp.distributions.Gamma(
tf.exp(self.tf_log_gam_a), tf.exp(self.tf_log_gam_b)
)
# noise observations
self.tf_tau_list = []
for m in range(self.M):
logtau_m = tf.Variable(0.0, dtype=util.tf_type)
self.tf_tau_list.append(tf.exp(logtau_m))
# initialize NN
self.mf_encode_list = self.init_feature_encode(self.encode)
# concatenate NN with linear projection
self.mf_outputs, self.mf_aug_features = self.init_mf_outputs(self.tf_Xtrain_list, self.tf_W_list)
self.mf_pred_outputs, self.mf_pred_aug_features = self.init_mf_outputs(self.tf_Xtest_list, self.tf_W_list)
self.expect_llh = self.eval_expect_llh()
self.KL = self.eval_divergence()
# negative evidence lower bound
self.nelbo = -(self.expect_llh - self.KL)
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.config['learning_rate'])
self.minimizer = self.optimizer.minimize(self.nelbo)
self.Xquery = tf.Variable(tf.random.uniform(minval=self.SynData.lb, maxval=self.SynData.ub, shape=[1,self.dim]), dtype=util.tf_type)
self.tf_Ws_list = []
for m in range(self.M):
Km = self.encode['Klist'][m]
self.tf_Ws_list.append(tf.compat.v1.placeholder(util.tf_type, [Km+1, 1]))
self.ws_fstar, self.ws_aug_feature = self.mf_output(self.Xquery, self.M-1, self.tf_Ws_list)
self.nfstar = -tf.squeeze(self.ws_fstar)
self.nfstar_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.nfstar,
method='L-BFGS-B',
var_to_bounds={self.Xquery: [self.SynData.lb, self.SynData.ub]},
var_list=[self.Xquery],
options={'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'eps':self.config['Fstar']['rate'],
'ftol' : 1.0 * np.finfo(float).eps},)
# finding inference maximum
self.Xinfer = tf.Variable(tf.random.uniform(minval=self.SynData.lb, maxval=self.SynData.ub, shape=[1,self.dim]), dtype=util.tf_type)
self.infer_star, self.infer_aug_feature = self.mf_output(self.Xinfer, self.M-1, self.tf_W_list)
self.neg_infer_maximum = -tf.squeeze(self.infer_star)
self.neg_infer_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.neg_infer_maximum,
method='L-BFGS-B',
var_to_bounds={self.Xinfer: [self.SynData.lb, self.SynData.ub]},
var_list=[self.Xinfer],
options={'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'eps':self.config['Infer']['rate'],
'ftol' : 1.0 * np.finfo(float).eps},)
gpu_options =tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=TF_GPU_USAGE)
self.sess = tf.compat.v1.Session(
config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
gpu_options=gpu_options,
)
)
self.sess.run(tf.compat.v1.global_variables_initializer())
def init_feature_encode(self, encode):
"""Initialize the feature encoding(NN) weights and biases"""
feature_encode_list = []
for m in range(self.M):
if m == 0:
layers = [self.dim] + encode['hlayers'][m] + [encode['Klist'][m]]
else:
layers = [self.dim+1] + encode['hlayers'][m] + [encode['Klist'][m]]
# end if
nn = util.EncodeNN(layers, init=encode['init'], activation=encode['activation'])
feature_encode_list.append(nn)
# end for
return feature_encode_list
def mf_output(self, X, m, Wlist):
# base fidelity
feature = self.mf_encode_list[0].forward(X)
augment_feature = tf.pad(feature, tf.constant([[0,0],[0,1]]), constant_values=1.0)
output = tf.matmul(augment_feature, Wlist[0])
for l in range(1, m+1):
augment_input = tf.concat([output, X], axis=1)
feature = self.mf_encode_list[l].forward(augment_input)
augment_feature = tf.pad(feature, tf.constant([[0,0],[0,1]]), constant_values=1.0)
output = tf.matmul(augment_feature, Wlist[l])
# end for
return output, augment_feature
def init_mf_outputs(self, Xlist, Wlist):
outputs = []
features = []
for m in range(self.M):
output, feature = self.mf_output(Xlist[m], m, Wlist)
outputs.append(output)
features.append(feature)
return outputs, features
def eval_divergence(self):
expect = []
for m in range(self.M):
Km = self.encode['Klist'][m]
Lm = self.tf_Wvar_Chol_list[m]
mu = self.tf_W_list[m]
log_det_Lm = -0.5*tf.reduce_sum(tf.math.log(tf.square(tf.linalg.diag_part(Lm))))
log_expect_m = -0.5*(Km+1)*tf.math.log(2*np.pi) -\
0.5*(tf.linalg.trace(tf.matmul(Lm, tf.transpose(Lm))) + tf.reduce_sum(mu*mu))
expect.append(log_det_Lm - log_expect_m)
return tf.add_n(expect)
def eval_expect_llh(self):
expect = []
Nlist = self.config['SynData'].Ntrain_list
for m in range(self.M):
Nm = Nlist[m]
phi_m = self.mf_aug_features[m]
mu_m = self.tf_W_list[m]
Lm = self.tf_Wvar_Chol_list[m]
tau_m = self.tf_tau_list[m]
ym = self.tf_ytrain_list[m]
LmLmT = tf.matmul(Lm, tf.transpose(Lm))
mumuT = tf.matmul(mu_m, tf.transpose(mu_m))
tr_phi_Lm_mu = tf.linalg.trace(tf.transpose(phi_m) @ phi_m @ (LmLmT + mumuT))
ym_phi_mu = tf.squeeze(ym*ym - 2*ym*tf.matmul(phi_m, mu_m))
expect_m = 0.5*Nm*tf.math.log(tau_m) - 0.5*tau_m*(tf.reduce_sum(ym_phi_mu) + tr_phi_Lm_mu) +\
self.noise_gam_prior.log_prob(tau_m)
expect.append(expect_m)
# end for
return tf.add_n(expect)
def train(self):
hist_train_err = []
hist_test_err = []
fdict = {}
for m in range(self.M):
Dm = self.MfData[m]
fdict[self.tf_Xtrain_list[m]] = Dm['Xtrain']
fdict[self.tf_ytrain_list[m]] = Dm['ytrain']
fdict[self.tf_Xtest_list[m]] = Dm['Xtest']
for it in tqdm(range(self.config['epochs'] + 1)):
self.sess.run(self.minimizer, feed_dict = fdict)
if it % 100 == 0:
nelbo = self.sess.run(self.nelbo, feed_dict=fdict)
mf_pred = self.sess.run(self.mf_pred_outputs, feed_dict=fdict)
mf_pred_train = self.sess.run(self.mf_outputs, feed_dict=fdict)
if self.config['verbose']:
print('it %d: nelbo = %.5f' % (it, nelbo))
for m in range(self.M):
pred_m = mf_pred[m]
pred_m_train = mf_pred_train[m]
ground_ytest = self.MfData[m]['ytest']
ground_ytrain = self.MfData[m]['ytrain']
err_test = np.sqrt(np.mean(np.square(pred_m - ground_ytest)))
err_train = np.sqrt(np.mean(np.square(pred_m_train - ground_ytrain)))
hist_train_err.append(err_train)
hist_test_err.append(err_test)
if self.config['verbose'] or it == self.config['epochs']:
print(' - fid %d: train_nrmse = %.5f, test_nrmse = %.5f' % (m, err_train, err_test))
Fstar, Xstar = self.collect_fstar()
infer_opt, infer_optser = self.eval_infer_opt()
return Fstar, infer_optser, self.sess
def collect_fstar(self):
Wpost = []
Lpost = []
for m in range(self.M):
Wpost.append(self.sess.run(self.tf_W_list[m]))
Lpost.append(self.sess.run(self.tf_Wvar_Chol_list[m]))
Fstar = []
Xstar = []
for s in range(self.config['Fstar']['Ns']):
fdict = {}
for m in range(self.M):
Ws = np.random.multivariate_normal(np.squeeze(Wpost[m]), | np.matmul(Lpost[m], Lpost[m].T) | numpy.matmul |
# Mobility as a function of volume fraction
import matplotlib.pyplot as plt
import numpy as np
import pystokes, os, sys
#Parameters
a, eta, dim = 1.0, 1.0/6, 3
Np, Nb, Nm = 1, 1, 8
ta =(4*np.pi/3)**(1.0/3)
L = ta/np.arange(0.01, 0.4, 0.01)
#Memory allocation
v = np.zeros(dim*Np)
r = np.zeros(dim*Np)
F = np.zeros(dim*Np)
vv = np.zeros(np.size(L))
phi = np.zeros(np.size(L) )
mu=1.0/(6*np.pi*eta*a)
print ('\phi', ' ', '\mu' )
for i in range(np.size(L)):
v = v*0
F = F*0
r[0], r[1], r[2] = 0.0, 0.0, 0.0
ff = pystokes.forceFields.Forces(Np)
ff.sedimentation(F, g=-1)
pRbm = pystokes.periodic.Rbm(a, Np, eta, L[i])
pRbm.mobilityTT(v, r, F, Nb, Nm)
phi[i] = (4*np.pi*a**3)/(3*L[i]**3)
mu00 = mu*F[2]
vv[i] = v[2]/mu00
print (phi[i], ' ', vv[i])
slope, intercept = | np.polyfit(phi**(1.0/3), vv, 1) | numpy.polyfit |
import Htool
import numpy as np
import mpi4py
# Custom generator
class Generator(Htool.IMatrix):
def __init__(self,points_target,points_source):
super().__init__(points_target.shape[1],points_source.shape[1])
self.points_target=points_target
self.points_source=points_source
def get_coef(self, i , j):
return 1.0 / (1e-5 + np.linalg.norm(self.points_target[:,i] - self.points_source[:,j]))
def build_submatrix(self, J , K, mat):
for j in range(0,len(J)):
for k in range(0,len(K)):
mat[j,k] = 1.0 / (1.e-5 + np.linalg.norm(self.points_target[:,J[j]] - self.points_source[:,K[k]]))
def mat_vec(self,x):
y = np.zeros(self.nb_rows())
for i in range(0,self.nb_rows()):
for j in range(0,self.nb_cols()):
y[i]+=self.get_coef(i,j)*x[j]
return y
def mat_mat(self,X):
Y = np.zeros((self.nb_rows(), X.shape[1]))
for i in range(0,self.nb_rows()):
for j in range(0,X.shape[1]):
for k in range(0,self.nb_cols()):
Y[i,j]+=self.get_coef(i, k)*X[k,j]
return Y
# Htool parameters
eta = 10
epsilon = 1e-3
minclustersize = 10
# Random geometry
NbRows = 500
NbCols = 500
np.random.seed(0)
points_target=np.zeros((2,NbRows))
sizeworld = mpi4py.MPI.COMM_WORLD.Get_size()
local_size=int(NbRows/sizeworld)
MasterOffset=np.zeros((2,sizeworld))
for i in range(0,sizeworld-1):
MasterOffset[0,i]=i*local_size
MasterOffset[1,i]=local_size
points_target[0,i*local_size:(i+1)*local_size] = i
points_target[0,(sizeworld-1)*local_size:] = sizeworld-1
MasterOffset[0,sizeworld-1]=(sizeworld-1)*local_size
MasterOffset[1,sizeworld-1]=NbRows-(sizeworld-1)*local_size
points_target[1,:] = np.random.random(NbRows)
# Cluster target
cluster_target = Htool.PCARegularClustering(2)
cluster_target.set_minclustersize(minclustersize)
cluster_target.build(NbRows,points_target,MasterOffset,2)
if NbRows==NbCols:
points_source=points_target
cluster_source=cluster_target
else:
points_source=np.zeros((2,NbCols))
points_source[0,:] = np.random.random(NbCols)
points_source[1,:] = np.random.random(NbCols)
cluster_source = None
# Build H matrix
generator = Generator(points_target,points_source)
HMatrix_test = Htool.HMatrix(cluster_target,cluster_source,epsilon,eta)
HMatrix_test.build(generator,points_target,points_source)
# Test matrix vector product
x = np.random.rand(NbCols)
y_1 = HMatrix_test*x
y_2 = generator.mat_vec(x)
print( | np.linalg.norm(y_1-y_2) | numpy.linalg.norm |
import os
import numpy as np
from nipype.interfaces.base import SimpleInterface, TraitedSpec, traits, File
from nipype.utils.filemanip import fname_presuffix
class ClipInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc="Input imaging file")
out_file = File(desc="Output file name")
minimum = traits.Float(-np.inf, usedefault=True,
desc="Values under minimum are set to minimum")
maximum = traits.Float(np.inf, usedefault=True,
desc="Values over maximum are set to maximum")
class ClipOutputSpec(TraitedSpec):
out_file = File(desc="Output file name")
class Clip(SimpleInterface):
""" Simple clipping interface that clips values to specified minimum/maximum
If no values are outside the bounds, nothing is done and the in_file is passed
as the out_file without copying.
"""
input_spec = ClipInputSpec
output_spec = ClipOutputSpec
def _run_interface(self, runtime):
import nibabel as nb
img = nb.load(self.inputs.in_file)
data = img.get_fdata()
out_file = self.inputs.out_file
if out_file:
out_file = os.path.join(runtime.cwd, out_file)
if np.any((data < self.inputs.minimum) | (data > self.inputs.maximum)):
if not out_file:
out_file = fname_presuffix(self.inputs.in_file, suffix="_clipped",
newpath=runtime.cwd)
| np.clip(data, self.inputs.minimum, self.inputs.maximum, out=data) | numpy.clip |
"""Functions library used to calculate phase diagrams for the "Robustness of
Majorana bound states in the short junction limit" paper by
<NAME>, <NAME>, and <NAME>
arXiv:1609.00637, to be published in PRB."""
# 1. Standard library imports
from itertools import product
import subprocess
from types import SimpleNamespace
# 2. External package imports
from discretizer import Discretizer, momentum_operators
import holoviews as hv
import ipyparallel
import kwant
import numpy as np
import scipy.sparse.linalg as sla
from scipy.constants import hbar, m_e, eV, physical_constants
from scipy.linalg import expm
from scipy.optimize import minimize_scalar
from sympy.physics.quantum import TensorProduct as kr
import sympy
# 3. Internal imports
from wraparound import wraparound
sx, sy, sz = [sympy.physics.matrices.msigma(i) for i in range(1, 4)]
s0 = sympy.eye(2)
class SimpleNamespace(SimpleNamespace):
"""Updates types.SimpleNamespace to have a .update() method.
Useful for parallel calculation."""
def update(self, **kwargs):
self.__dict__.update(kwargs)
return self
def get_git_revision_hash():
"""Get the git hash to save with data to ensure reproducibility."""
git_output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
return git_output.decode("utf-8").replace('\n', '')
# Parameters taken from arXiv:1204.2792
# All constant parameters, mostly fundamental constants, in a SimpleNamespace.
constants = SimpleNamespace(
m=0.015 * m_e, # effective mass in kg
g=50, # Lande factor
hbar=hbar,
m_e=m_e,
eV=eV,
meV=eV * 1e-3)
constants.t = (constants.hbar ** 2 / (2 * constants.m)) * (1e18 / constants.meV) # meV * nm^2
constants.mu_B = physical_constants['Bohr magneton'][0] / constants.meV
constants.delta_2d = constants.hbar**2 * np.pi**2 / (8 * (100e-9)**2 * constants.m) / constants.meV
constants.unit_B = 2 * constants.delta_2d / (constants.g * constants.mu_B)
# Dimensions used in holoviews objects.
d = SimpleNamespace(B=hv.Dimension('$B$', unit='T'),
mu=hv.Dimension('$\mu$', unit='meV'),
gap=hv.Dimension(('gap', r'$E_\mathrm{gap}/\Delta$')),
E=hv.Dimension('$E$', unit='meV'),
k=hv.Dimension('$k_x$'),
xi_inv=hv.Dimension(r'$\xi^-1$', unit=r'nm$^-1$'),
xi=hv.Dimension(r'$\xi$', unit=r'nm'))
def make_params(alpha=20,
B_x=0,
B_y=0,
B_z=0,
mu=0,
mu_sc=0,
mu_sm=0,
mu_B=constants.mu_B,
t=constants.t,
g=constants.g,
orbital=False,
**kwargs):
"""Function that creates a namespace with parameters.
Parameters:
-----------
alpha : float
Spin-orbit coupling strength in units of meV*nm.
B_x, B_y, B_z : float
The magnetic field strength in the x, y and z direction in units of Tesla.
Delta : float
The superconducting gap in units of meV.
mu : float
The chemical potential in units of meV.
mu_sm, mu_sc : float
The chemical potential in in the SM and SC units of meV.
mu_B : float
Bohr magneton in meV/K.
t : float
Hopping parameter in meV * nm^2.
g : float
Lande g factor.
orbital : bool
Switches the orbital effects on and off.
Returns:
--------
p : SimpleNamespace object
A simple container that is used to store Hamiltonian parameters.
"""
return SimpleNamespace(t=t,
g=g,
mu_B=mu_B,
alpha=alpha,
B_x=B_x,
B_y=B_y,
B_z=B_z,
mu=mu,
mu_sc=mu_sc,
mu_sm=mu_sm,
orbital=orbital,
**kwargs)
def trs(m):
"""Apply time reversal symmetry to a column vector or matrix m.
The time reversal symmetry is given by the operator i * sigma_y * K, with K
complex conjugation and sigma_y acting on the spin degree of freedom.
Parameters:
-----------
m : numpy array
The vector or matrix to which TRS is applied.
Returns:
--------
m_reversed : numpy array
The vector TRS * m as a NumPy array.
Notes:
------
Implementation inspired by kwant.rmt.
"""
permutation = np.arange(m.shape[0])
sign = 2 * (permutation % 2) - 1
permutation -= sign
return sign.reshape(-1, 1) * m.conj()[permutation]
class TRIInfiniteSystem(kwant.builder.InfiniteSystem):
def __init__(self, lead, trs):
"""A lead with time reversal invariant modes."""
self.__dict__ = lead.__dict__
self.trs = trs
def modes(self, energy=0, args=()):
prop_modes, stab_modes = \
super(TRIInfiniteSystem, self).modes(energy=energy, args=args)
n = stab_modes.nmodes
stab_modes.vecs[:, n:(2*n)] = self.trs(stab_modes.vecs[:, :n])
stab_modes.vecslmbdainv[:, n:(2*n)] = \
self.trs(stab_modes.vecslmbdainv[:, :n])
prop_modes.wave_functions[:, n:] = \
self.trs(prop_modes.wave_functions[:, :n])
return prop_modes, stab_modes
def discretized_hamiltonian(a, dim, holes=False):
"""Discretizes a Hamiltonian.
Parameters:
-----------
a : int
Lattice constant in nm.
dim : int
Dimension of system, 2D or 3D.
holes : bool
If False, Hamiltonian will only be in spin-space,
if True also in particle-hole space (BdG Hamiltonian),
used for calculating Majorana decay length.
"""
if dim not in [2, 3]: raise(NotImplementedError)
k_x, k_y, k_z = momentum_operators
t, B_x, B_y, B_z, mu_B, mu, mu_sm, mu_sc, alpha, g, V, Delta = sympy.symbols(
't B_x B_y B_z mu_B mu mu_sm mu_sc alpha g V Delta', real=True)
k = sympy.sqrt(k_x**2 + k_y**2 + (k_z**2 if dim==3 else 0))
if not holes:
ham = ((t * k**2 - mu) * s0 +
alpha * (k_y * sx - k_x * sy) +
0.5 * g * mu_B * (B_x * sx + B_y * sy + B_z * sz))
else:
ham = ((t * k**2 - mu) * kr(s0, sz) +
alpha * (k_y * kr(sx, sz) - k_x * kr(sy, sz)) +
0.5 * g * mu_B * (B_x * kr(sx, s0) + B_y * kr(sy, s0)) +
Delta * kr(s0, sx))
args = dict(lattice_constant=a, discrete_coordinates=set(['x', 'y', 'z'][:dim]))
tb_sm = Discretizer(ham.subs(mu, mu_sm).subs(Delta, 0), **args)
tb_sc = Discretizer(ham.subs([(g, 0), (mu, mu_sc), (alpha, 0), (k_x, 0), (k_z, 0)]), **args)
return tb_sm, tb_sc
def peierls(val, ind, a, c=constants):
"""Peierls substitution, takes hopping functions.
See usage in NS_infinite_2D_3D()"""
def phase(s1, s2, p):
A_site = [0, 0, p.B_x * s1.pos[1]][ind] * a * 1e-18 * c.eV / c.hbar
return np.exp(-1j * A_site)
def with_phase(s1, s2, p):
hop = val(s1, s2, p).astype('complex128')
phi = phase(s1, s2, p)
if p.orbital:
if hop.shape[0] == 2:
hop *= phi
elif hop.shape[0] == 4:
hop *= np.array([phi, phi.conj(), phi, phi.conj()], dtype='complex128')
return hop
return with_phase
def NS_infinite_2D_3D(a=10, W=100, H=100, dim=3, normal_lead=False, sc_lead=True, holes=False):
"""Makes a square shaped wire.
Parameters:
-----------
a : int
Lattice constant in nm.
W : int
Width of system in nm.
H : int
Height of system in nm (ignored if dim=2).
dim : int
Dimension of system, 2D or 3D.
normal_lead : bool
Attaches a SM lead to the sytem, used for
calculating transmission.
sc_lead : bool
Attaches a SC lead to the sytem.
holes : bool
If False, Hamiltonian will only be in spin-space,
if True also in particle-hole space, used for calculating
Majorana decay length.
Returns:
--------
syst : kwant.builder.(In)finiteSystem object
The finalized (in)finite system.
"""
tb_sm, tb_sc = discretized_hamiltonian(a, dim, holes)
lat = tb_sm.lattice
syst = kwant.Builder(kwant.TranslationalSymmetry((a, 0, 0)[:dim]))
lead_sc = kwant.Builder(kwant.TranslationalSymmetry((a, 0, 0)[:dim], (0, -a, 0)[:dim]))
lead_sm = kwant.Builder(kwant.TranslationalSymmetry((a, 0, 0)[:dim], (0, -a, 0)[:dim]))
if dim == 2:
def shape_func_sm(W, H):
def shape(pos):
(x, y) = pos
return 0 < y <= W
return (shape, (0, W/2))
def shape_func_sc(H):
def shape(pos):
(x, y) = pos
return y <= 0
return (shape, (0, 0))
elif dim == 3:
def shape_func_sm(W, H):
def shape(pos):
(x, y, z) = pos
return 0 < y <= W and -H/2 < z <= H/2
return (shape, (0, W, 0))
def shape_func_sc(H):
def shape(pos):
(x, y, z) = pos
return y <= 0 and -H/2 < z <= H/2
return (shape, (0, 0, 0))
shape_sm = shape_func_sm(W, H)
shape_sc = shape_func_sc(H)
syst[lat.shape(*shape_sm)] = tb_sm.onsite
lead_sc[lat.shape(*shape_sc)] = tb_sc.onsite
lead_sm[lat.shape(*shape_sc)] = tb_sm.onsite
for hop, val in tb_sm.hoppings.items():
ind = np.argmax(hop.delta)
syst[hop] = peierls(val, ind, a)
lead_sm[hop] = val
for hop, val in tb_sc.hoppings.items():
lead_sc[hop] = val
syst = wraparound(syst)
if sc_lead:
syst.attach_lead(wraparound(lead_sc, keep=1))
if normal_lead:
syst.attach_lead(wraparound(lead_sm, keep=1).reversed())
fsyst = syst.finalized()
fsyst.leads = [TRIInfiniteSystem(lead, trs) for lead in fsyst.leads]
return fsyst
def energy_operator(syst, p, k_x):
"""Returns the operator of Eq. (11) of paper.
Parameters:
-----------
syst : kwant.builder.InfiniteSystem object
The finalized system.
p : types.SimpleNamespace object
A simple container that is used to store Hamiltonian parameters.
k_x : float
Momentum for which the energies are calculated.
Returns:
--------
operator : numpy array
Operator in Eq. (11)."""
smat_min = kwant.smatrix(syst, args=[p, -k_x]).data
smat_plus = kwant.smatrix(syst, args=[p, +k_x]).data
smat_prod = smat_plus.T.conj() @ smat_min.T
return 0.5 * np.eye(smat_prod.shape[0]) - 0.25 * (smat_prod + smat_prod.T.conj())
def energies_over_delta(syst, p, k_x):
"""Same as energy_operator(), but returns the
square-root of the eigenvalues"""
operator = energy_operator(syst, p, k_x)
return np.sqrt(np.linalg.eigvalsh(operator))
def find_gap(syst, p, num=201):
"""Find the mimimum in energy in a range of momenta in one third
of the Brillioun zone.
Parameters:
-----------
syst : kwant.builder.InfiniteSystem object
The finalized system.
p : types.SimpleNamespace object
A simple container that is used to store Hamiltonian parameters.
num : int
Number of momenta, more momenta are needed with orbital effects
in 3D at B_x > 0.5 T.
Returns:
--------
(E, k_x) : tuple of floats
Tuple of minimum energy found at k_x.
"""
ks = np.linspace(-0.001, 1, num)
eigvals = np.array([energies_over_delta(syst, p, k_x) for k_x in ks])
ind_min, _ = np.unravel_index(eigvals.argmin(), eigvals.shape)
if ind_min == 0:
bounds = (ks[0], ks[1])
elif ind_min == num - 1:
bounds = (ks[num - 2], ks[num-1])
else:
bounds = (ks[ind_min - 1], ks[ind_min + 1])
res = minimize_scalar(lambda k_x: energies_over_delta(syst, p, k_x).min(),
bounds=bounds, method='bounded')
k_x = res.x
E = res.fun
return E, k_x
def plot_bands(syst, p, ks=None):
"""Plot bandstructure using Eq. (11) of the paper.
Parameters:
-----------
syst : kwant.builder.InfiniteSystem object
The finalized system.
p : types.SimpleNamespace object
A simple container that is used to store Hamiltonian parameters.
ks : numpy array or None
Range of momenta for which the energies are calculated.
Returns:
--------
plot : hv.Path object
Curve of k vs. E_gap/Delta.
"""
if ks is None:
ks = np.linspace(-2, 2, 200)
eigvals = np.array([energies_over_delta(syst, p, k_x) for k_x in ks])
return hv.Path((ks, eigvals), kdims=[d.k, r'$E/\Delta$'])[:, 0:1.1]
def modes(h_cell, h_hop, tol=1e6):
"""Compute the eigendecomposition of a translation operator of a lead.
Adapted from kwant.physics.leads.modes such that it returns the eigenvalues.
Parameters:
----------
h_cell : numpy array, real or complex, shape (N, N) The unit cell
Hamiltonian of the lead unit cell.
h_hop : numpy array, real or complex, shape (N, M)
The hopping matrix from a lead cell to the one on which self-energy
has to be calculated (and any other hopping in the same direction).
tol : float
Numbers and differences are considered zero when they are smaller
than `tol` times the machine precision.
Returns
-------
ev : numpy array
Eigenvalues of the translation operator in the form lambda=exp(i*k).
"""
m = h_hop.shape[1]
n = h_cell.shape[0]
if (h_cell.shape[0] != h_cell.shape[1] or
h_cell.shape[0] != h_hop.shape[0]):
raise ValueError("Incompatible matrix sizes for h_cell and h_hop.")
# Note: np.any(h_hop) returns (at least from numpy 1.6.1 - 1.8-devel)
# False if h_hop is purely imaginary
if not (np.any(h_hop.real) or np.any(h_hop.imag)):
v = np.empty((0, m))
return (kwant.physics.PropagatingModes(np.empty((0, n)), np.empty((0,)),
np.empty((0,))),
kwant.physics.StabilizedModes(np.empty((0, 0)),
np.empty((0, 0)), 0, v))
# Defer most of the calculation to helper routines.
matrices, v, extract = kwant.physics.leads.setup_linsys(
h_cell, h_hop, tol, None)
ev = kwant.physics.leads.unified_eigenproblem(*(matrices + (tol,)))[0]
return ev
def slowest_evan_mode(syst, p, a, c=constants, return_ev=False):
"""Find the slowest decaying (evanescent) mode.
It uses an adapted version of the function kwant.physics.leads.modes,
in such a way that it returns the eigenvalues of the translation operator
(lamdba = e^ik). The imaginary part of the wavevector k, is the part that
makes it decay. The inverse of this Im(k) is the size of a Majorana bound
state. The norm of the eigenvalue that is closest to one is the slowes
decaying mode. Also called decay length.
Parameters:
-----------
syst : kwant.builder.InfiniteSystem object
The finalized system.
p : types.SimpleNamespace object
A simple container that is used to store Hamiltonian parameters.
c : types.SimpleNamespace object
A namespace container with all constant (fundamental) parameters used.
Returns:
--------
majorana_length : float
The length of the Majorana.
"""
def H(k_x):
ham = kwant.solvers.default.hidden_instance._make_linear_sys
return ham(syst, [0], args=[p, k_x], realspace=True)[0].lhs.todense()
h = (H(0) + H(np.pi)) / 2
t_plus_t_ = (H(0) - H(np.pi)) / 2
t_min_t_ = (H(np.pi/2) - H(-np.pi/2)) / 2j
t = (t_plus_t_ + t_min_t_) / 2
ev = modes(h, t)
norm = ev * ev.conj()
idx = np.abs(norm - 1).argmin()
if return_ev:
return ev[idx]
majorana_length = np.abs(a / np.log(ev[idx]).real)
return majorana_length
def plot_phase(dview, lview, p, W, H, dim, num_k=201, fname=None, Bs=None, mus=None, a=10, async_parallel=True):
"""Calculates a phase diagram of bandgap sizes in (B, mu) space in parallel.
Parameters:
-----------
dview : DirectView ipyparallel object
client = ipyparallel.Client(); dview = client[:]
lview : LoadedBalanced view ipyparallel object
client = client.load_balanced_view()
p : shortjunction.SimpleNamespace object
Container with all parameters for Hamiltonian
W : int
Width of system in nm.
H : int
Height of system in nm (ignored if dim=2).
dim : int
Dimension of system, 2D or 3D.
num_k : int
Number of momenta on which the bandstructure is calculated.
Bs : numpy array or list
Range of values of magnetic field on which the bandgap is calculated.
mus : numpy array or list
Range of values of chemical potentials on which the bandgap is calculated.
a : int
Discretization constant in nm.
async_parallel : bool
If true it uses lview.map_async, if False it uses a gather scatter formalism,
which is faster for very short jobs.
Returns:
--------
plot : hv.Image
Holoviews Image of the phase diagram. The raw data can be accessed
via plot.data.
Notes:
------
WARNING: This is the opposite behaviour of plot_decay_lengths()
The parameter `mu_sc` is set to a fixed value, if you want to set it to
the same values as `mu_sm`, change
p.update(B_x=x[0], mu_sm=x[1]) ---> p.update(B_x=x[0], mu_sm=x[1], mu_sc=x[1]).
"""
syst_str = 'syst = NS_infinite_2D_3D(a={}, W={}, H={}, dim={})'.format(a, W, H, dim)
dview.execute(syst_str, block=True)
dview['p'] = p
dview['num_k'] = num_k
if Bs is None:
Bs = np.linspace(0, 1, 50)
if mus is None:
mus = np.linspace(0.1, 15, 50)
vals = list(product(Bs, mus))
if async_parallel:
systs = [ipyparallel.Reference('syst')] * len(vals)
Es = lview.map_async(lambda x, sys: find_gap(sys, p.update(B_x=x[0], mu_sm=x[1]), num_k),
vals, systs)
Es.wait_interactive()
Es = Es.result()
result = np.array(Es).reshape(len(Bs), len(mus), -1)
else:
dview.scatter('xs', vals, block=True)
dview.execute('Es = [find_gap(syst, p.update(B_x=x[0], mu_sm=x[1]), num_k) for x in xs]',
block=True)
Es = dview.gather('Es', block=True)
result = np.array(Es).reshape(len(Bs), len(mus), -1)
gaps = result[:, :, 0]
k_xs = result[:, :, 1]
bounds = (Bs.min(), mus.min(), Bs.max(), mus.max())
kwargs = {'kdims': [d.B, d.mu],
'vdims': [d.gap],
'bounds': bounds,
'label': 'Band gap'}
plot = hv.Image(np.rot90(gaps), **kwargs)
plot.cdims.update(dict(p=p, k_xs=k_xs, Bs=Bs, mus=mus, W=W, H=H, dim=dim,
constants=constants, num_k=num_k,
git_hash=get_git_revision_hash()))
return plot
def plot_decay_lengths(dview, lview, p, W, H, dim, fname=None, Bs=None, mus=None, a=10, async_parallel=False):
"""Calculates a phase diagram of Majorana decay lengths (nm)
in (B, mu) space.
Parameters:
-----------
dview : DirectView ipyparallel object
client = ipyparallel.Client(); dview = client[:]
lview : LoadedBalanced view ipyparallel object
client = client.load_balanced_view()
p : shortjunction.SimpleNamespace object
Container with all parameters for Hamiltonian
W : int
Width of system in nm.
H : int
Height of system in nm (ignored if dim=2).
dim : int
Dimension of system, 2D or 3D.
Bs : numpy array or list
Range of values of magnetic field on which the bandgap is calculated.
mus : numpy array or list
Range of values of chemical potentials on which the bandgap is calculated.
a : int
Discretization constant in nm.
async_parallel : bool
If true it uses lview.map_async, if False it uses a gather scatter formalism,
which is faster for very short jobs.
Returns:
--------
plot : hv.Image
Holoviews Image of the phase diagram. The raw data can be accessed
via plot.data.
Notes:
------
WARNING: This is the opposite behaviour of plot_phase()
The parameter `mu_sc` is equal to `mu_sm`, if you want to set it to
a fixed value
p.update(B_x=x[0], mu_sm=x[1], mu_sc=x[1]) ---> p.update(B_x=x[0], mu_sm=x[1]).
"""
syst_str = 'syst = NS_infinite_2D_3D(a={}, W={}, H={}, dim={}, holes=True)'.format(a, W, H, dim)
dview.execute(syst_str, block=True)
dview['p'] = p
dview['a'] = a
if Bs is None:
Bs = np.linspace(0, 2, 50)
if mus is None:
mus = np.linspace(0.1, 15, 50)
vals = list(product(Bs, mus))
if async_parallel:
systs = [ipyparallel.Reference('syst')] * len(vals)
decay_lengths = lview.map_async(lambda x, sys:
slowest_evan_mode(sys, p.update(B_x=x[0], mu_sm=x[1], mu_sc=x[1]), a), vals, systs)
decay_lengths.wait_interactive()
result = np.array(decay_lengths.result()).reshape(len(Bs), len(mus))
else:
dview.scatter('xs', vals, block=True)
dview.execute("""decay_lengths = [slowest_evan_mode(syst,
p.update(B_x=x[0], mu_sm=x[1], mu_sc=x[1]), a) for x in xs]""")
decay_lengths = dview.gather('decay_lengths', block=True)
result = np.array(decay_lengths).reshape(len(Bs), len(mus))
bounds = (Bs.min(), mus.min(), Bs.max(), mus.max())
kwargs = {'kdims': [d.B, d.mu],
'vdims': [d.xi],
'bounds': bounds,
'label': 'Decay length'}
plot = hv.Image(np.rot90(result), **kwargs)
plot.cdims.update(dict(p=p, Bs=Bs, mus=mus, W=W, H=H, dim=dim, constants=constants,
git_hash=get_git_revision_hash()))
return plot
def Ez_to_B(Ez, constants=constants):
"""Converts from Zeeman energy to magnetic field"""
return 2 * Ez / (constants.g * constants.mu_B)
def B_to_Ez(B, constants=constants):
"""Converts from magnetic field to Zeeman energy"""
return 0.5 * constants.g * constants.mu_B * B
def sparse_eigs(ham, n_eigs, n_vec_lanczos, sigma=0):
"""Compute eigenenergies using MUMPS as a sparse solver.
Parameters:
----------
ham : coo_matrix
The Hamiltonian of the system in sparse representation..
n_eigs : int
The number of energy eigenvalues to be returned.
n_vec_lanczos : int
Number of Lanczos vectors used by the sparse solver.
sigma : float
Parameter used by the shift-inverted method. See
documentation of scipy.sparse.linalg.eig
Returns:
--------
A list containing the sorted energy levels. Only positive
energies are returned.
"""
class LuInv(sla.LinearOperator):
def __init__(self, A):
inst = kwant.linalg.mumps.MUMPSContext()
inst.factor(A, ordering='metis')
self.solve = inst.solve
try:
super(LuInv, self).__init__(shape=A.shape, dtype=A.dtype,
matvec=self._matvec)
except TypeError:
super(LuInv, self).__init__(shape=A.shape, dtype=A.dtype)
def _matvec(self, x):
return self.solve(x.astype(self.dtype))
ev, evecs = sla.eigs(ham, k=n_eigs,
OPinv=LuInv(ham), sigma=sigma, ncv=n_vec_lanczos)
energies = list(ev.real)
return energies
def NS_finite(a, L, W, W_sc):
"""Makes two-dimensional NS junction, normal part
of width W connected to the superconducting part of
width W_sc.
Parameters:
-----------
a: int
Lattice constant in units of nm.
L: int
Length of the wire and superconductor in x-direction.
W: int
Width of the normal section of the wire in units of a.
W_sc: int
Width of the superconducting section of the wire in units of a.
Returns:
--------
syst: kwant.builder.FiniteSystem object
Finalized tight-binding systtem.
"""
lat = kwant.lattice.square(a)
syst = kwant.Builder()
def onsite_sm(site, p):
return (4 * p.t / a**2 - p.mu) * np.kron(s0, sz) + p.Ez * np.kron(sx, s0)
def hopx_sm(site1, site2, p):
return -p.t / a**2 * np.kron(s0, sz) - 0.5j * p.alpha / a * np.kron(sy, sz)
def hopy_sm(site1, site2, p):
return -p.t / a**2 * np.kron(s0, sz) + 0.5j * p.alpha / a * np.kron(sx, sz)
def onsite_sc(site, p):
return ((2 * p.t / a**2 + 2 * p.tpar / a**2 - p.mu) * np.kron(s0, sz) +
p.delta * np.kron(s0, sx))
def hopx_sc(site1, site2, p):
return -p.tpar / a**2 * np.kron(s0, sz)
def hopy_sc(site1, site2, p):
return -p.t / a**2 * np.kron(s0, sz)
# Onsite energies
syst[(lat(i, j) for i in range(L) for j in range(W+1))] = onsite_sm
syst[(lat(i, j) for i in range(L) for j in range(W+1, W+W_sc))] = onsite_sc
# Hopping energies
syst[((lat(i, j), lat(i+1, j)) for i in range(L-1) for j in range(W+1))] = hopx_sm
syst[((lat(i, j), lat(i+1, j)) for i in range(L-1) for j in range(W+1, W+W_sc))] = hopx_sc
syst[((lat(i, j), lat(i, j+1)) for i in range(L) for j in range(W))] = hopy_sm
syst[((lat(i, j), lat(i, j+1)) for i in range(L) for j in range(W, W+W_sc-1))] = hopy_sc
return syst.finalized()
def NS_infinite(a, L):
"""Makes two-dimensional NS junction, with a 2D semi-infinite
superconducting lead connected to the normal part with finite
length and infinite in the direction parallel to the interface.
Parameters:
-----------
a : int
Lattice constant in units of nm.
L : int
Width of the normal parts in units of nm.
Returns:
--------
syst: kwant.builder.FiniteSystem object
Finalized tight-binding systtem.
"""
sx, sy, sz = [np.array(sympy.physics.matrices.msigma(i)).astype(np.complex)
for i in range(1, 4)]
s0 = np.eye(2)
lat = kwant.lattice.square(a)
def onsite(site, p):
return ((4 * p.t / a**2 - p.mu) * s0 + p.Ez * sx)
def hopx(site1, site2, p):
return -p.t / a**2 * s0 - 0.5j * p.alpha / a * sy
def hopy(site1, site2, p):
return -p.t / a**2 * s0 + 0.5j * p.alpha / a * sx
def lead_onsite(site, p):
return (2 * p.t / a**2 + 2 * p.tpar / a**2 - p.mu) * s0
def lead_hopx(site1, site2, p):
return -p.tpar / a**2 * s0
def lead_hopy(site1, site2, p):
return -p.t / a**2 * s0
def shape_sm(pos):
(x, y) = pos
return 0 < y <= L
def shape_sc(pos):
(x, y) = pos
return y >= 0
# SM part
sym_sm = kwant.TranslationalSymmetry((a, 0))
syst = kwant.Builder(sym_sm)
syst[lat.shape(shape_sm, (0, L / 2))] = onsite
syst[kwant.HoppingKind((1, 0), lat)] = hopx
syst[kwant.HoppingKind((0, 1), lat)] = hopy
# SC lead
lead_sym = kwant.TranslationalSymmetry((a, 0), (0, a))
lead = kwant.Builder(lead_sym)
lead[lat.shape(shape_sc, (0, 0))] = lead_onsite
lead[kwant.HoppingKind((1, 0), lat)] = lead_hopx
lead[kwant.HoppingKind((0, 1), lat)] = lead_hopy
syst = wraparound(syst)
syst.attach_lead(wraparound(lead, keep=1))
syst = syst.finalized()
syst.leads = [TRIInfiniteSystem(lead, trs) for lead in syst.leads]
return syst
def SNS_infinite(a, L, W_sm):
"""Makes two-dimensional SNS junction with orbital effect in N part
Parameters:
-----------
a : int
Lattice constant in units of nm.
L : int
Width of the superconducting in units of sites.
W_sm : int
Width of the normal (semi-conducting) parts in units of sites.
Returns:
--------
syst: kwant.builder.FiniteSystem object
Finalized tight-binding systtem.
"""
sx, sy, sz = [np.array(sympy.physics.matrices.msigma(i)).astype(np.complex)
for i in range(1, 4)]
s0 = np.eye(2)
lat = kwant.lattice.square(a)
def onsite_sm(site, p):
return (4 * p.t / a**2 - p.mu) * np.kron(s0, sz) + B_to_Ez(p.B) * np.kron(sx, s0)
def onsite_sc(site, p):
return ((2 * p.t / a**2 + 2 * p.tpar / a**2 - p.mu) * np.kron(s0, sz) +
p.delta * np.kron(s0, sx))
def hopx_sm(site1, site2, p):
y1 = site1.tag[0]
y2 = site2.tag[0]
phi = 0.25 * np.pi * a**2 * p.D**2 * constants.eV * 1e-18 * p.B / constants.hbar / W_sm
exp_phi = expm(1j * phi * np.kron(s0, sz))
return (-p.t / a**2 * exp_phi @ np.kron(s0, sz) +
0.5j * exp_phi @ np.kron(sx, sz) * | np.sin((y1 + y2) / p.D) | numpy.sin |
# -*- coding: UTF-8 -*-
import numpy
"""
================================================================================
逻辑回归
================================================================================
Logistic回归的一般过程:
1. 收集数据:采用任意方法
2. 准备数据:数值型数据(需要进行距离计算),结构化数据最佳
3. 分析数据:任意方法
4. 训练算法:大部分时间用于训练,为了找到最佳的分类回归系数
5. 测试算法:一旦训练步骤完成,分类将会很快
6. 使用算法:输入一些数据,转化成结构化数值;回归系数对数值进行回归计算,判断出类别
================================================================================
优点:计算代价不高,易于理解和实现
缺点:容易欠拟合,分类精度可能不高
适用数据类型:数值型和标称型
================================================================================
最优化算法:
1. 梯度上升法:
要找到函数的最大值,就沿着该函数的梯度方向探寻 w:=w+α∇wf(w)
步骤:
每个回归系数初始化为1
重复R次:
计算整个数据集的梯度
使用alpha * gradient更新回归系数的向量
返回回归系数
================================================================================
"""
"""
Sigmoid函数: sigmoid(x) = 1.0/(1+numpy.exp(-x))
Sigmoid函数的输入记为z,则: z = w[0]x[0] + w[1]x[1] + w[2]x[2] + ... + w[n]x[n]
"""
def sigmoid(x):
return 1.0 / (1 + numpy.exp(-x))
"""
创建数据集
"""
def create_dataset():
dataset = []
labels = []
file_reader = open('dataset.txt')
for line in file_reader.readlines():
items = line.strip().split()
dataset.append([1.0, float(items[0]), float(items[1])])
labels.append(int(items[2]))
return dataset, labels
"""
梯度上升函数
"""
def grad_ascent(dataset, labels):
dataset_matrix = numpy.mat(dataset)
labels_matrix = numpy.mat(labels).transpose()
m, n = numpy.shape(dataset_matrix)
weights = numpy.ones((n, 1))
alpha = 0.001
batch = 500
for i in range(batch):
h = sigmoid(dataset_matrix * weights)
err = (labels_matrix - h)
weights = weights + alpha * dataset_matrix.transpose() * err
return weights
"""
画出数据集合逻辑回归最佳拟合直线的函数
"""
def plot_best_fit(dataset,labels,weights):
import matplotlib.pyplot as plt
dataset = numpy.mat(dataset)
dataset_array = numpy.array(dataset)
size = | numpy.shape(dataset_array) | numpy.shape |
#!/usr/bin/env pytest
import numpy as np
import pytest
import siglib as sl
@pytest.mark.parametrize(
"x,frame_length,frame_step,pad,pad_value,expected",
(
( | np.arange(10) | numpy.arange |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from scipy.interpolate import interp1d
class patcher(patcher):
def __init__(self, body='./body/body_uketsukejo.png', **options):
super().__init__('受付嬢', body=body, pantie_position=[860, 1671], **options)
self.mask = io.imread('./mask/mask_uketsukejo.png')
def convert(self, image):
pantie = np.array(image)
patch = np.copy(pantie[-140:-5, 546:, :])
patch = skt.resize(patch[::-1, ::-1, :], (230, 65), anti_aliasing=True, mode='reflect')
[pr, pc, d] = patch.shape
pantie[127 - 5:127 - 5 + pr, :pc, :] = np.uint8(patch * 255)
front = pantie[:, :300]
back = pantie[:, 300:-10]
back[380:] = 0
front = front[::-1, ::-1]
back = back[::-1, ::-1]
def mesh_transform(img, arr):
[r, c, d] = img.shape
src_cols = np.linspace(0, c, int(np.sqrt(arr.shape[0])))
src_rows = np.linspace(0, r, int( | np.sqrt(arr.shape[0]) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def click_coords(fig=None, timeout=600.):
if fig is None:
fig = plt.gcf()
xy = []
def onclick(event):
if not event.inaxes:
fig.canvas.stop_event_loop()
else:
xy.append([event.xdata, event.ydata])
print("Gathering coordinates of mouse clicks. Click outside of the axes " \
"when done.")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.start_event_loop(timeout=timeout)
fig.canvas.mpl_disconnect(cid)
return np.array(xy)
def common_axes(fig, pos=None):
if pos is None:
bigax = fig.add_subplot(111)
else:
bigax = fig.add_axes(pos)
[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]
bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')
bigax.set_zorder(-10)
return bigax
def log_frac(x, frac):
l0, l1 = list(map(np.log10, x))
ld = l1 - l0
l = ld*frac + l0
return 10**l
def log2linear(x, errneg=None, errpos=None):
xl = 10**x
result = [xl]
if errneg is not None:
xn = xl - 10**(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = 10**(x + errpos) - xl
result.append(xp)
return result
def linear2log(x, errneg=None, errpos=None):
xl = np.log10(x)
result = [x]
if errneg is not None:
xn = xl - np.log10(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = np.log10(x + errpos) - xl
result.append(xp)
return result
def step(*args, **kwargs):
edges, values = args[0], args[1]
# deal with potentially gappy 2-column bin specifications
edges = np.asarray(edges)
if edges.ndim == 2:
if np.any(edges[1:,0] < edges[:-1,1]):
raise ValueError('Some bins overlap')
if np.any(edges[1:,0] < edges[:-1,0]):
raise ValueError('Bins must be in increasing order.')
gaps = edges[1:,0] > edges[:-1,1]
edges = np.unique(edges)
if np.any(gaps):
values = np.insert(values, np.nonzero(gaps), np.nan)
edges = mnp.lace(edges[:-1], edges[1:])
values = mnp.lace(values, values)
args = list(args)
args[0], args[1] = edges, values
ax = kwargs.pop('ax', plt.gca())
return ax.plot(*args, **kwargs)
def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):
if scale == 'log':
lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)
return 10 ** lx, 10 ** ly
if xfrac is not None:
if xfrac == 0:
return x[0], y[0]
if xfrac == 1:
return x[-1], y[-1]
else:
d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))
d = np.insert(d, 0, 0)
f = d/d[-1]
xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]
return xp, yp
if xlbl is not None:
return xlbl, np.interp(xlbl, x, y)
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
def tight_axis_limits(ax=None, xory='both', margin=0.05):
if ax is None: ax = plt.gca()
def newlim(oldlim):
delta = abs(oldlim[1] - oldlim[0])
pad = delta*margin
if oldlim[1] > oldlim[0]:
return (oldlim[0] - pad, oldlim[1] + pad)
else:
return (oldlim[0] + pad, oldlim[1] - pad)
def newlim_log(oldlim):
loglim = [np.log10(l) for l in oldlim]
newloglim = newlim(loglim)
return (10.0**newloglim[0], 10.0**newloglim[1])
def newlim_either(oldlim,axlim,scale):
if axlim[1] < axlim [0]: oldlim = oldlim[::-1]
if scale == 'linear':
return newlim(oldlim)
elif scale == 'log':
return newlim_log(oldlim)
elif scale == 'symlog':
raise NotImplementedError('Past Parke to future Parke, you did\'t write an implementation for symlog'
'scaled axes.')
if xory == 'x' or xory == 'both':
datalim = ax.dataLim.extents[[0,2]]
axlim = ax.get_xlim()
scale = ax.get_xscale()
ax.set_xlim(newlim_either(datalim,axlim,scale))
if xory == 'y' or xory == 'both':
datalim = ax.dataLim.extents[[1,3]]
axlim = ax.get_ylim()
scale = ax.get_yscale()
ax.set_ylim(newlim_either(datalim,axlim,scale))
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):
if ax is None: ax = plt.gca()
p = ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y, fmt, **kw)
if len(yerr.shape) == 2:
ylo = y - yerr[0,:]
yhi = y + yerr[1,:]
else:
ylo, yhi = y - yerr, y + yerr
if ecolor is None: ecolor = p[0].get_color()
# deal with matplotlib sometimes not showing polygon when it extends beyond plot range
xlim = ax.get_xlim()
inrange = mnp.inranges(x, xlim)
if not np.all(inrange):
n = np.sum(inrange)
yends = np.interp(xlim, x, y)
yloends = np.interp(xlim, x, ylo)
yhiends = np.interp(xlim, x, yhi)
x = np.insert(x[inrange], [0, n], xlim)
y = np.insert(y[inrange], [0, n], yends)
ylo = np.insert(ylo[inrange], [0, n], yloends)
yhi = np.insert(yhi[inrange], [0, n], yhiends)
f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)
return p[0],f
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = | np.sin(el * np.pi / 180.0) | numpy.sin |
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Embedding, Dot, Lambda, Conv2D
from keras.layers import MaxPooling2D, Flatten, Concatenate, Dense
from keras.layers import Activation, BatchNormalization, Dropout
def semantic_match(X, Y, A, window):
"""Computing semantic match in direction X -> Y
shape X: (s,n,d), Y: (s,m,d), A: (s, n, m)
"""
# shape Pivot, lower_lim, upper_lim: (s,n,1)
Pivot = np.expand_dims( | np.argmax(A, axis=-1) | numpy.argmax |
"""
Line profile filters for creating synthetic spectra.
"""
import numpy as np
def gaussianR(wvl, wvl0, factor=1000.):
"""
A gaussian filter where the gaussian width is given by `wvl0`/`factor`.
Parameters
-----------
wvl : `~numpy.ndarray`
Wavelength array
wvl0 : `~numpy.float64`
Wavelength filter should be centered on.
factor : `~numpy.float64`
Resolving power
"""
std = wvl0/factor
wvl = np.asarray(wvl)
return np.exp(-0.5*((wvl - wvl0)/std)**2)/(np.sqrt(2.*np.pi)*std)
def gaussian(wvl, wvl0, factor=1.):
"""
A gaussian filter
Parameters
-----------
wvl : `~numpy.ndarray`
Wavelength array
wvl0 : `~numpy.float64`
Wavelength filter should be centered on.
factor : `~numpy.float64`
Gaussian width
integrated value is unity
"""
wvl = np.asarray(wvl, np.float64)
dwvl = wvl - np.roll(wvl, 1)
dwvl[0] = dwvl[1]
return np.exp(-0.5*((wvl - wvl0)/factor)**2)/(np.sqrt(2.*np.pi)*factor)
def boxcar(wvl, wvl0, factor=None):
"""
Box-car filter
Parameters
-----------
wvl : `~numpy.ndarray`
Wavelength array
wvl0 : `~numpy.float64`
Wavelength filter should be centered on.
factor : `~numpy.float64`
Full width of the box-car filter
"""
wvl = np.asarray(wvl, np.float64)
dwvl = wvl - np.roll(wvl, 1)
dwvl[0] = dwvl[1]
one = np.ones_like(wvl)
zed = np.zeros_like(wvl)
if factor is None:
factor = dwvl.min()
if factor < dwvl.min():
raise ValueError('Width must be at least equal to the wavelength step')
good1 = (wvl > wvl0 - factor/2.)
good2 = (wvl < wvl0 + factor/2.)
realgood = np.logical_and(good1, good2)
return np.where(realgood, one, zed)/(factor)
def lorentz(wvl, wvl0, factor=1.):
"""
Lorentz profile filter with the exception that all factors are in wavelength units
rather than frequency as the lorentz profile is usually defined.
Parameters
-----------
wvl : `~numpy.ndarray`
Wavelength array
wvl0 : `~numpy.float64`
Wavelength filter should be centered on.
factor : `~numpy.float64`
Value of the so-called constant gamma
integrated value is unity
the FWHM is 2*gamma
.. math::
L = \\frac{1}{\pi \gamma} \\frac{ \gamma^2}{(\lambda - \lambda_0)^2 + \gamma^2}
"""
wvl = | np.asarray(wvl) | numpy.asarray |
#!/usr/bin/env python3
import numpy as np
from follow_curve import FollowCurve
from utilities.misc import *
from group_move import *
from pios_facility import PiosFacility
from get_init import pios_facility_parameter
import copy
import time
import threading
class Robot():
def __init__(self,x = 0,y = 0,ang = 0,timestamp = 0):
self.pos = np.array([[x],[y],[ang]])
self.timestamp = timestamp
class Cooperate:
def __init__(self):
self.FollowCurve = FollowCurve()
self.isStoped = True
self.quit = False
self.followcircle = False
self.followcurve = False
self.N = 4
self.debug = 0
self.linear_velocity_gain = 0.3
self.angular_velocity_gain = 0.4
self.velocity_magnitude_limit = 0.15
self.angular_velocity_limit = 0.8
self.position_error = 0.03
self.position_epsilon = 0.01
self.rotation_error = 0.125
if self.debug == 1:
initial_conditions = np.array([[0.3, 0.3, 0.1, 0.1], [0.1, 0.3, 0.3, 0.1], [0.0, 0, 0, 0]])
else:
initial_conditions = np.array([])
self.goal = np.array([[2.0,1.7,2.0,1.7], [-0.1,-0.1,0.3,0.3], [0,0,0,0]])
self.facility = PiosFacility(number_of_robots=self.N, show_figure=True, initial_conditions=initial_conditions,
sim_in_real_time=True)
self.facility.robot_id_tf(self.debug)
self.pios_facility_parameter = pios_facility_parameter()
self.controller = create_controller(self,"trigonometry")
def get_circle(self,radius,pos):
path = np.array([[], []])
origin = np.array([pos[0]+radius,pos[1]])
for i in range(31):
theta = np.pi - np.pi/30 * i
temp = np.array([[origin[0] + radius*np.cos(theta)], [origin[1]+radius*np.sin(theta)]])
path = np.append(path, temp, axis=1)
return path
def generate_path_to_goal(self,current_point,goal_point):
paths = []
for i in range(current_point.shape[1]):
path = np.array([[], []])
dist = np.linalg.norm(current_point[0:2,i] - goal_point[0:2,i])
segment = int(dist/0.02)
temp_x = np.linspace(current_point[0,i],goal_point[0,i],segment)
temp_y = np.linspace(current_point[1,i],goal_point[1,i],segment)
path = np.array([temp_x,temp_y])
paths.append(path)
return paths
def follow_circle(self,n,radius):
if self.debug == 1:
x = self.facility.get_poses()
self.facility.step()
else:
x = self.facility.get_real_position()
paths = []
robots_goal_points = np.array([[],[],[]])
local_points = copy.copy(x)
for i in range(self.N):
path = self.get_circle(radius,x[:,i])
# new_path = self.FollowCurve.path_interpolation(path, n)
goal_point = np.array([[path[0, -2]], [path[1, -2]], [0]])
robots_goal_points = np.append(robots_goal_points,goal_point,axis=1)
paths.append(path)
delta = path[:,1]-path[:,0]
angle = np.arctan2(delta[1],delta[0])
local_points[2,i]=angle
print(x)
print("-------")
print(local_points)
while np.size(at_pose(x, local_points,self.position_error,self.rotation_error)) != self.N and self.isStoped == False:
# Get poses of agents
x = self.facility.get_poses()
dxu = np.zeros((2,self.N))
for i in range(self.N):
ang = x[2,i]-local_points[2,i]
ang = np.arctan2(np.sin(ang),np.cos(ang))
dxu[0][i] = 0.0
dxu[1][i] = -np.sign(ang) * 0.4
if np.linalg.norm(x[2, i] - local_points[2, i]) < self.rotation_error:
local_points[:,i] = x[:,i]
dxu[1][i] = 0.0
# print("Rotating dxu:", dxu)
# Set the velocities by mapping the single-integrator inputs to unciycle inputs
self.facility.set_velocities(np.arange(self.N), dxu)
# Iterate the simulation
if self.debug == 1:
self.facility.step()
else:
self.facility.step_real()
time.sleep(0.033)
self.facility.get_real_position()
# self.facility.poses_publisher()
if self.quit or self.isStoped:
return
print("Rotation finished")
while np.size(at_position(x[:2, :], robots_goal_points[:2,:],self.position_error)) != self.N and self.isStoped == False:
x = self.facility.get_poses()
current_points = np.delete(x, 2, 0)
temp_points = np.array([[],[],[]])
for i in range(self.N):
current_point = np.reshape(current_points[:,i],(2,1))
next_point, min_dis = self.FollowCurve.get_map_point(paths[i], current_point, 0.1)
angle = np.arctan2(next_point[1]-x[1,i],next_point[0]-x[0,i])
if min_dis < n - 3:
temp_point = np.array([[next_point[0]], [next_point[1]], [angle]])
else:
temp_point = np.reshape(robots_goal_points[:,i],(3,1))
if np.linalg.norm(x[0:2,i] - robots_goal_points[0:2,i]) < self.position_error:
robots_goal_points[:,i] = x[:,i]
temp_points=np.append(temp_points,temp_point,axis=1)
# dxu = self.controller(x, robots_goal_points)
dxu = self.controller(x, temp_points)
# print("dxu: ",dxu)
self.facility.set_velocities(np.arange(self.N), dxu)
if self.debug == 1:
self.facility.step()
else:
self.facility.step_real()
time.sleep(0.033)
self.facility.get_real_position()
print("Translation finished")
self.facility.stop_all()
self.followcircle = False
def formation_follow_curve(self, n):
for i in range(self.goal.shape[1]):
if self.debug == 1:
x = self.facility.get_poses()
print(x)
self.facility.step()
else:
x = self.facility.get_real_position()
dist, angle_to_destination = get_dist_and_angle(x,self.goal[:,i])
robots_goal_x = x[0,:]+ dist*np.cos(angle_to_destination)
robots_goal_y = x[1,:]+ dist*np.sin(angle_to_destination)
robots_goal_z = np.linspace(angle_to_destination,angle_to_destination,self.N)
robots_goal_points = np.array([robots_goal_x,robots_goal_y,robots_goal_z])
print("angle:", angle_to_destination*180/np.pi)
local_points = copy.copy(x)
local_points[2,:] = angle_to_destination
while np.size(at_pose(x, local_points,self.position_error,self.rotation_error)) != self.N and self.isStoped == False:
# Get poses of agents
x = self.facility.get_poses()
dxu = np.zeros((2,self.N))
for i in range(self.N):
ang = x[2,i]-local_points[2,i]
ang = np.arctan2(np.sin(ang),np.cos(ang))
dxu[0][i] = 0.0
dxu[1][i] = -np.sign(ang) * 0.4
if np.linalg.norm(x[2, i] - local_points[2, i]) < self.rotation_error:
local_points[:,i] = x[:,i]
dxu[1][i] = 0.0
# print("Rotating dxu:", dxu)
# Set the velocities by mapping the single-integrator inputs to unciycle inputs
self.facility.set_velocities(np.arange(self.N), dxu)
# Iterate the simulation
if self.debug == 1:
self.facility.step()
else:
self.facility.step_real()
time.sleep(0.033)
self.facility.get_real_position()
# self.facility.poses_publisher()
if self.quit or self.isStoped:
return
print("Rotation finished")
paths = self.generate_path_to_goal(x,robots_goal_points)
new_paths = []
goal_points = []
for i in range(self.N):
new_path = self.FollowCurve.path_interpolation(paths[i], n)
goal_point = np.array([new_path[0, -2], new_path[1, -2], angle_to_destination])
new_paths.append(new_path)
goal_points.append(goal_point)
if self.debug == 1:
x = self.facility.get_poses()
self.facility.step()
else:
x = self.facility.get_real_position()
while np.size(at_position(x[:2, :], robots_goal_points[:2,:],self.position_error)) != self.N and self.isStoped == False:
# Get the poses of the robots
x = self.facility.get_poses()
current_points = np.delete(x, 2, 0)
temp_points = np.array([[],[],[]])
for i in range(self.N):
current_point = np.reshape(current_points[:,i],(2,1))
next_point, min_dis = self.FollowCurve.get_map_point(new_paths[i], current_point, 0.1)
angle = | np.arctan2(next_point[1]-x[1,i],next_point[0]-x[0,i]) | numpy.arctan2 |
import os
import pickle
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
import numpy as np
from nnunet.configuration import default_num_threads
from nnunet.preprocessing.preprocessing import GenericPreprocessor
def compute_sdf(img_gt):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
assert img_gt.min() == 0, "img_gt.min() = " + str(img_gt.min())
img_gt = img_gt.astype(np.uint8)
posmask = img_gt.astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='outer').astype(np.uint8)
sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / (
np.max(posdis) - np.min(posdis))
sdf[boundary == 1] = 0
assert np.min(sdf) == -1.0, (np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
assert np.max(sdf) == 1.0, (np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
return sdf
class DTCPreprocessor(GenericPreprocessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _run_internal(self, target_spacing, case_identifier, output_folder_stage, cropped_output_dir, force_separate_z,
all_classes):
data, seg, properties = self.load_cropped(cropped_output_dir, case_identifier)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing,
properties, seg, force_separate_z)
try:
lsf_value = compute_sdf(seg)
except AssertionError:
np.savez(os.path.join('/', *output_folder_stage.split('/')[:-1], "debug_output", case_identifier + "_seg.npz"), data=seg)
np.savez(os.path.join('/', *output_folder_stage.split('/')[:-1], "debug_output", case_identifier + "_img.npz"), data=data)
raise AssertionError(case_identifier + " Level Set Function Compute Error.")
all_data = np.vstack((data, lsf_value, seg)).astype(np.float32)
# we need to find out where the classes are and sample some random locations
# let's do 10.000 samples per class
# seed this for reproducibility!
num_samples = 10000
min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too sparse
rndst = | np.random.RandomState(1234) | numpy.random.RandomState |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.