prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nobroadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
np.random.seed(42)
x1_np = np.random.rand(10, 20).astype(np.float32)
x2_np = np.random.rand(10, 20).astype(np.float32)
x1_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32)
x2_np_int32 = np.random.randint(0, 100, (10, 20)).astype(np.int32)
output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.minimum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.maximum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np > x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Greater()(Tensor(x1_np_int32), Tensor(x2_np_int32))
output_np = x1_np_int32 > x2_np_int32
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np < x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Less()(Tensor(x1_np_int32), Tensor(x2_np_int32))
output_np = x1_np_int32 < x2_np_int32
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
output_np = np.power(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np / x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np * x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np - x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nobroadcast_fp16():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
np.random.seed(42)
x1_np = np.random.rand(10, 20).astype(np.float16)
x2_np = np.random.rand(10, 20).astype(np.float16)
output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.minimum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.maximum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np > x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np < x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
output_np = np.power(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np / x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np * x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np - x2_np
assert np.allclose(output_ms.asnumpy(), output_np)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_broadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
np.random.seed(42)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
x2_np = np.random.rand(1, 4, 1, 6).astype(np.float32)
x1_np_int32 = np.random.randint(0, 100, (3, 1, 5, 1)).astype(np.int32)
x2_np_int32 = np.random.randint(0, 100, (3, 1, 5, 1)).astype(np.int32)
output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
output_np = | np.minimum(x1_np, x2_np) | numpy.minimum |
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
import os
import numpy as np
from xcessiv import functions, exceptions
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import pickle
filepath = os.path.join(os.path.dirname(__file__),
'extractmaindataset.py')
class TestHashFile(unittest.TestCase):
def test_hash_file(self):
assert functions.hash_file(filepath) == "1c67f8f573b69a9da2f986e1006ff63a" \
"10fbb70298af45d0293e490b65b34edc"
assert functions.hash_file(filepath) == functions.hash_file(filepath, 2)
class TestImportObjectFromPath(unittest.TestCase):
def test_import_object_from_path(self):
returned_object = functions.import_object_from_path(filepath,
"extract_main_dataset")
assert callable(returned_object)
pickle.loads(pickle.dumps(returned_object)) # make sure pickle works
class TestImportObjectFromStringCode(unittest.TestCase):
def test_import_object_from_string_code(self):
with open(filepath) as f:
returned_object = functions.\
import_object_from_string_code(f.read(), "extract_main_dataset")
assert callable(returned_object)
pickle.loads(pickle.dumps(returned_object)) # make sure pickle works
class TestImportStringCodeAsModule(unittest.TestCase):
def test_import_string_code_as_module(self):
with open(filepath) as f:
module = functions.\
import_string_code_as_module(f.read())
assert callable(module.extract_main_dataset)
assert module.dummy_variable == 2
pickle.loads(pickle.dumps(module.extract_main_dataset)) # make sure pickle works
class TestVerifyDataset(unittest.TestCase):
def test_correct_dataset(self):
X, y = load_digits(return_X_y=True)
verification_dict = functions.verify_dataset(X, y)
assert verification_dict['features_shape'] == (1797,64)
assert verification_dict['labels_shape'] == (1797,)
def test_invalid_assertions(self):
self.assertRaises(exceptions.UserError,
functions.verify_dataset,
[[1, 2, 2], [2, 3, 5]],
[1, 2, 3])
self.assertRaises(exceptions.UserError,
functions.verify_dataset,
[[1, 2, 2], [2, 3, 5]],
[[1, 2, 3]])
self.assertRaises(exceptions.UserError,
functions.verify_dataset,
[[[1, 2, 2]], [[2, 3, 5]]],
[1, 2, 3])
class TestIsValidJSON(unittest.TestCase):
def test_is_valid_json(self):
assert functions.is_valid_json({'x': ['i am serializable', 0.1]})
assert not functions.is_valid_json({'x': RandomForestClassifier()})
class TestMakeSerializable(unittest.TestCase):
def test_make_serializable(self):
assert functions.is_valid_json({'x': ['i am serializable', 0.1]})
assert not functions.is_valid_json({'x': RandomForestClassifier()})
assert functions.make_serializable(
{
'x': ['i am serializable', 0.1],
'y': RandomForestClassifier()
}
) == {'x': ['i am serializable', 0.1]}
class GetSampleDataset(unittest.TestCase):
def setUp(self):
self.dataset_properties = {
'type': 'multiclass',
}
def test_classification_dataset(self):
X, y, split = functions.get_sample_dataset(self.dataset_properties)
assert X.shape == (100, 20)
assert y.shape == (100,)
assert len(np.unique(y)) == 2
self.dataset_properties['n_classes'] = 4
self.dataset_properties['n_informative'] = 18
X, y, split = functions.get_sample_dataset(self.dataset_properties)
assert X.shape == (100, 20)
assert y.shape == (100,)
assert len(np.unique(y)) == 4
self.dataset_properties['n_features'] = 100
X, y, split = functions.get_sample_dataset(self.dataset_properties)
assert X.shape == (100, 100)
assert y.shape == (100,)
assert len(np.unique(y)) == 4
self.dataset_properties['n_samples'] = 24
X, y, split = functions.get_sample_dataset(self.dataset_properties)
assert X.shape == (24, 100)
assert y.shape == (24,)
assert len(np.unique(y)) == 4
def test_iris_dataset(self):
X, y, split = functions.get_sample_dataset({'type': 'iris'})
assert X.shape == (150, 4)
assert y.shape == (150,)
def test_mnist_dataset(self):
X, y, split = functions.get_sample_dataset({'type': 'mnist'})
assert X.shape == (1797, 64)
assert y.shape == (1797,)
def test_breast_cancer_dataset(self):
X, y, split = functions.get_sample_dataset({'type': 'breast_cancer'})
assert X.shape == (569, 30)
assert y.shape == (569,)
def test_boston_housing(self):
X, y, split = functions.get_sample_dataset({'type': 'boston'})
assert X.shape == (506, 13)
assert y.shape == (506,)
def test_diabetes(self):
X, y, split = functions.get_sample_dataset({'type': 'diabetes'})
assert X.shape == (442, 10)
assert y.shape == (442,)
class TestVerifyEstimatorClass(unittest.TestCase):
def setUp(self):
self.source = ''.join([
"from sklearn.metrics import accuracy_score\n",
"import numpy as np\n",
"def metric_generator(y_true, y_probas):\n",
" argmax = np.argmax(y_probas, axis=1)\n",
" return accuracy_score(y_true, argmax)"
])
self.wrong_source = "metric_generator = ''"
self.dataset_properties = {
'type': 'multiclass',
}
def test_verify_estimator_class(self):
np.random.seed(8)
performance_dict, hyperparameters = functions.verify_estimator_class(
RandomForestClassifier(),
'predict_proba',
dict(Accuracy=self.source),
self.dataset_properties
)
assert round(performance_dict['Accuracy'], 3) == 0.8
assert hyperparameters == {
'warm_start': False,
'oob_score': False,
'n_jobs': 1,
'verbose': 0,
'max_leaf_nodes': None,
'bootstrap': True,
'min_samples_leaf': 1,
'n_estimators': 10,
'min_samples_split': 2,
'min_weight_fraction_leaf': 0.0,
'criterion': 'gini',
'random_state': None,
'min_impurity_split': None,
'min_impurity_decrease': 0.0,
'max_features': 'auto',
'max_depth': None,
'class_weight': None
}
def test_non_serializable_parameters(self):
pipeline = Pipeline([('pca', PCA()), ('rf', RandomForestClassifier())])
performance_dict, hyperparameters = functions.verify_estimator_class(
pipeline,
'predict_proba',
dict(Accuracy=self.source),
self.dataset_properties
)
assert functions.is_valid_json(hyperparameters)
def test_assertion_of_invalid_metric_generator(self):
| np.random.seed(8) | numpy.random.seed |
from __future__ import print_function, division
import numpy as np
import os
import errno
import tensorflow as tf
from tabulate import tabulate
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import QuantileTransformer, RobustScaler
from sklearn.preprocessing import FunctionTransformer
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import math
def min_max_scale(X_train, X_valid, X_test):
scaler = MinMaxScaler(feature_range=(0.0, 1.0))
scaler.fit(X_train)
norm_train = scaler.transform(X_train)
norm_valid = scaler.transform(X_valid) if X_valid is not None else None
norm_test = scaler.transform(X_test) if X_test is not None else None
return norm_train, norm_valid, norm_test
def standard_scale(X_train, X_valid, X_test):
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_valid = scaler.transform(X_valid) if X_valid is not None else None
X_test = scaler.transform(X_test) if X_test is not None else None
return X_train, X_valid, X_test
def interquartile_scale(X_train, X_valid, X_test):
scaler = RobustScaler(quantile_range=(25.0, 75.0))
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_valid = scaler.transform(X_valid) if X_valid is not None else None
X_test = scaler.transform(X_test) if X_test is not None else None
return X_train, X_valid, X_test
def quantile_transform(X_train, X_valid, X_test, columns):
t = QuantileTransformer()
t.fit(X_train[:, columns])
qX_train = t.transform(X_train[:, columns])
qX_valid = t.transform(X_valid[:, columns]) \
if X_valid is not None else None
qX_test = t.transform(X_test[:, columns]) if X_test is not None else None
if X_valid is not None:
X_train[:, columns] = qX_train
X_valid[:, columns] = qX_valid
X_test[:, columns] = qX_test
return X_train, X_valid, X_test
else:
return X_train
def augment_quantiled(X_train, X_valid, X_test, columns):
t = QuantileTransformer()
t.fit(X_train[:, columns])
qX_train = t.transform(X_train[:, columns])
qX_valid = t.transform(X_valid[:, columns]) \
if X_valid is not None else None
qX_test = t.transform(X_test[:, columns]) if X_test is not None else None
mX_train, mX_valid, mX_test = min_max_scale(X_train, X_valid, X_test)
X_train = np.concatenate((mX_train, qX_train), axis=1)
if qX_valid is None:
return X_train
else:
X_valid = np.concatenate((mX_valid, qX_valid), axis=1)
X_test = np.concatenate((mX_test, qX_test), axis=1)
return X_train, X_valid, X_test
def log_transform(X_train, X_valid, X_test, columns):
t = FunctionTransformer(np.log1p)
part_X_train = t.transform(X_train[:, columns])
part_X_train = t.transform(X_train[:, columns])
part_X_valid = t.transform(X_valid[:, columns])
part_X_test = t.transform(X_test[:, columns])
X_train[:, columns] = part_X_train
X_valid[:, columns] = part_X_valid
X_test[:, columns] = part_X_test
return X_train, X_valid, X_test
def accuracy(predictions, labels):
return 100.0 * np.sum(np.argmax(predictions, 1) ==
np.argmax(labels, 1)) / predictions.shape[0]
def accuracy_binary(predictions, labels):
predicted_class = np.argmax(predictions, 1)
actual_class = np.argmax(labels, 1)
correct1 = np.logical_and(np.greater(predicted_class, 0),
np.greater(actual_class, 0))
correct2 = np.logical_and(predicted_class == 0, actual_class == 0)
return 100.0 * (np.sum(correct1) + np.sum(correct2)) / predictions.shape[0]
def compute_classification_table(predictions, labels):
n_cls = labels.shape[1]
class_table = np.zeros((n_cls, n_cls))
predicted_cls = np.argmax(predictions, 1)
actual_cls = np.argmax(labels, 1)
for (a, p) in zip(actual_cls, predicted_cls):
class_table[a][p] += 1
return class_table
def compute_classification_table_binary(predictions, labels):
class_table = np.zeros((2, 2))
predicted_class = np.argmax(predictions, 1)
actual_class = np.argmax(labels, 1)
for (a, p) in zip(actual_class, predicted_class):
class_table[int(a > 0)][int(p > 0)] += 1
return class_table
def correct_percentage(matrix, dataset_name='Test'):
"""
:param matrix: map from actual to predicted
:return: precision and recall measurement
"""
epsilon = 1e-26
num_classes = matrix.shape[0]
weights = np.array([np.sum(matrix[i, :]) / np.sum(matrix) for i
in range(num_classes)])
weights = | np.reshape(weights, [num_classes, 1]) | numpy.reshape |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from analysis.extra_analysis import get_cv_description, colvars
from system_setup import create_cvs
from system_setup.create_stringpaths import cvs_len5path
from system_setup.string_finding.pnas_simulation_loader import *
from utils.helpfunc import *
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import utils
logger = logging.getLogger("density_field")
def plot_2d_field(xvalues,
yvalues,
Vij,
cvx,
cvy,
ngrid,
cmap=plt.cm.Greys,
heatmap=True,
scatter=False):
if heatmap:
xmin = np.amin(xvalues)
xmax = np.amax(xvalues)
ymin = np.amin(yvalues)
ymax = np.amax(yvalues)
X, Y = np.meshgrid(np.linspace(xmin, xmax, ngrid), np.linspace(ymin, ymax, ngrid))
Z = Vij # np.reshape(Vij, X.shape)
Z = Z - Z.min()
# Z[Z>7] = np.nan
# plt.figure(figsize=(10,8))
# Zrange = Z.max() - Z.min()
# levels = np.arange(Z.min(), Z.min() + Zrange / 2, Zrange / ngrid)
im = plt.contourf(
X.T,
Y.T,
Z,
# np.rot90(Z), #FOR plt.imshow()
# levels=levels,
cmap=cmap,
extent=[xmin, xmax, ymin, ymax])
ct = plt.contour(
X.T,
Y.T,
Z,
# levels=levels,
extent=[xmin, xmax, ymin, ymax],
alpha=0.3,
colors=('k',))
# im.cmap.set_under('k')
# im.set_clim(0, Z.max())
cbar = plt.colorbar(im, orientation='vertical')
cbar.set_label(r'$\Delta G$ [kcal/mol]', fontsize=utils.label_fontsize)
cbar.ax.tick_params(labelsize=utils.ticks_labelsize)
# plt.ylabel(cvy.id)
# plt.xlabel(cvx.id)
plt.grid()
# plt.show()
if scatter:
# gaussian normalized by total number of points
xy = np.vstack([xvalues, yvalues])
colors = Vij(xy)
im = plt.scatter(xvalues, yvalues, c=colors, cmap=cmap)
plt.colorbar(im, orientation='vertical')
plt.ylabel(cvy.id)
plt.xlabel(cvx.id)
plt.show()
def to_free_energy(density, norm=1, delta=1e-7):
"""ConvertsTODO move to separate module"""
return lambda x: -kb * 310.15 * np.log(density(x) / norm + delta)
def get_cv_coordinates(simulation_evals, cvs):
"""Put all CV values into a matrix with len(cvs) rows and total number of simulation frames as columns"""
frame_count = 0
for simulation, cv_values in simulation_evals:
frame_count += len(simulation.traj)
cv_coordinates = np.empty((len(cvs), frame_count))
logger.debug("Aggregating all simulations")
for i, cv in enumerate(cvs):
frames_offset = 0
for simulation, cv_values in simulation_evals:
val = cv_values[i]
traj_size = len(val)
cv_coordinates[i, frames_offset:(frames_offset + traj_size)] = val
frames_offset += traj_size
return cv_coordinates
def integrate_cv(V, cv_idx, width=1):
return | np.sum(V, axis=cv_idx) | numpy.sum |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
gggp.process(catp, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Finally a set (with all patches) using the GGGCrossCorrelation class.
gggc = treecorr.GGGCrossCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
print('CrossCorrelation:')
gggc.process(catp, catp, catp)
for g in gggc._all:
print(g.ntri.ravel())
print(g.gam0.ravel())
print(g.vargam0.ravel())
np.testing.assert_allclose(g.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam0, ggg.vargam0, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam1, ggg.vargam1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam2, ggg.vargam2, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam3, ggg.vargam3, rtol=0.05 * tol_factor)
fc = lambda gggc: np.concatenate([
[np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)]
for g in gggc._all])
print('jackknife:')
cov = gggc.estimate_cov('jackknife', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggc.estimate_cov('sample', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggc.estimate_cov('marked_bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggc.estimate_cov('bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.3*tol_factor)
# Without func, don't check the accuracy, but make sure it returns something the right shape.
cov = gggc.estimate_cov('jackknife')
assert cov.shape == (48, 48)
@timer
def test_nnn_jk():
# Test jackknife and other covariance estimates for nnn correlations.
if __name__ == '__main__':
# This setup takes about 1200 sec to run.
nhalo = 300
nsource = 2000
npatch = 16
source_factor = 50
rand_factor = 3
tol_factor = 1
elif False:
# This setup takes about 250 sec to run.
nhalo = 200
nsource = 1000
npatch = 16
source_factor = 50
rand_factor = 2
tol_factor = 2
else:
# This setup takes about 44 sec to run.
nhalo = 100
nsource = 500
npatch = 8
source_factor = 30
rand_factor = 1
tol_factor = 3
file_name = 'data/test_nnn_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
rng = np.random.RandomState()
nruns = 1000
all_nnns = []
all_nnnc = []
t0 = time.time()
for run in range(nruns):
t2 = time.time()
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng)
p = k**3
p /= np.sum(p)
ns = rng.poisson(nsource)
select = rng.choice(range(len(x)), size=ns, replace=False, p=p)
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k))
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rrr.process(rand_cat)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s, _ = ddd.calculateZeta(rrr)
zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd)
print('simple: ',zeta_s.ravel())
print('compensated: ',zeta_c.ravel())
all_nnns.append(zeta_s.ravel())
all_nnnc.append(zeta_c.ravel())
t3 = time.time()
print('time: ',round(t3-t2),round((t3-t0)/60),round((t3-t0)*(nruns/(run+1)-1)/60))
mean_nnns = np.mean(all_nnns, axis=0)
var_nnns = np.var(all_nnns, axis=0)
mean_nnnc = np.mean(all_nnnc, axis=0)
var_nnnc = np.var(all_nnnc, axis=0)
np.savez(file_name, mean_nnns=mean_nnns, var_nnns=var_nnns,
mean_nnnc=mean_nnnc, var_nnnc=var_nnnc)
data = np.load(file_name)
mean_nnns = data['mean_nnns']
var_nnns = data['var_nnns']
mean_nnnc = data['mean_nnnc']
var_nnnc = data['var_nnnc']
print('mean simple = ',mean_nnns)
print('var simple = ',var_nnns)
print('mean compensated = ',mean_nnnc)
print('var compensated = ',var_nnnc)
# Make a random catalog with 2x as many sources, randomly distributed .
rng = np.random.RandomState(1234)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
t0 = time.time()
rrr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
print('RRR:',rrr.tot)
print(rrr.ntri.ravel())
# Make the data catalog
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng=rng)
print('mean k = ',np.mean(k))
print('min,max = ',np.min(k),np.max(k))
p = k**3
p /= np.sum(p)
select = rng.choice(range(len(x)), size=nsource, replace=False, p=p)
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr)
zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd)
print('DDD:',ddd.tot)
print(ddd.ntri.ravel())
print('simple: ')
print(zeta_s1.ravel())
print(var_zeta_s1.ravel())
print('DRR:',drr.tot)
print(drr.ntri.ravel())
print('RDD:',rdd.tot)
print(rdd.ntri.ravel())
print('compensated: ')
print(zeta_c1.ravel())
print(var_zeta_c1.ravel())
# Make the patches with a large random catalog to make sure the patches are uniform area.
big_rx = rng.uniform(0,1000, 100*nsource)
big_ry = rng.uniform(0,1000, 100*nsource)
big_catp = treecorr.Catalog(x=big_rx, y=big_ry, npatch=npatch, rng=rng)
patch_centers = big_catp.patch_centers
# Do the same thing with patches on D, but not yet on R.
dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rddp = dddp.copy()
drrp = dddp.copy()
catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers)
print('Patch\tNtot')
for p in catp.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
print('with patches on D:')
dddp.process(catp)
rddp.process(rand_cat, catp)
drrp.process(catp, rand_cat)
# Need to run calculateZeta to get patch-based covariance
with assert_raises(RuntimeError):
dddp.estimate_cov('jackknife')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print('simple: ')
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
# Check the _calculate_xi_from_pairs function. Using all pairs, should get total xi.
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
# None of these are very good without the random using patches.
# I think this is basically just that the approximations used for estimating the area_frac
# to figure out the appropriate altered RRR counts isn't accurate enough when the total
# counts are as low as this. I think (hope) that it should be semi-ok when N is much larger,
# but this is probably saying that for 3pt using patches for R is even more important than
# for 2pt.
# Ofc, it could also be that this is telling me I still have a bug somewhere that I haven't
# managed to find... :(
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.3*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor)
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp)
print('compensated: ')
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=3.8*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
# Now with the random also using patches
# These are a lot better than the above tests. But still not nearly as good as we were able
# to get in 2pt. I'm pretty sure this is just due to the fact that we need to have much
# smaller catalogs to make it feasible to run this in a reasonable amount of time. I don't
# think this is a sign of any bug in the code.
print('with patched random catalog:')
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=patch_centers)
rrrp = rrr.copy()
rrrp.process(rand_catp)
drrp.process(catp, rand_catp)
rddp.process(rand_catp, catp)
print('simple: ')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.7*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.0*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('compensated: ')
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp)
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
# I haven't implemented calculateZeta for the NNNCrossCorrelation class, because I'm not
# actually sure what the right thing to do here is for calculating a single zeta vectors.
# Do we do a different one for each of the 6 permutations? Or one overall one?
# So rather than just do something, I'll wait until someone has a coherent use case where
# they want this and can explain exactly what the right thing to compute is.
# So to just exercise the machinery with NNNCrossCorrelation, I'm using a func parameter
# to compute something equivalent to the simple zeta calculation.
dddc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rrrc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
print('CrossCorrelation:')
dddc.process(catp, catp, catp)
rrrc.process(rand_catp, rand_catp, rand_catp)
def cc_zeta(corrs):
d, r = corrs
d1 = d.n1n2n3.copy()
d1._sum(d._all)
r1 = r.n1n2n3.copy()
r1._sum(r._all)
zeta, _ = d1.calculateZeta(r1)
return zeta.ravel()
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
# Repeat with a 1-2 cross-correlation
print('CrossCorrelation 1-2:')
dddc.process(catp, catp)
rrrc.process(rand_catp, rand_catp)
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nhalo = 100
ngal = 500
npatch = 16
rand_factor = 5
else:
nhalo = 100
ngal = 30
npatch = 16
rand_factor = 2
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(ngal, nhalo, rng)
rx = rng.uniform(0,1000, rand_factor*ngal)
ry = rng.uniform(0,1000, rand_factor*ngal)
rand_cat_nopatch = treecorr.Catalog(x=rx, y=ry)
rand_cat = treecorr.Catalog(x=rx, y=ry, npatch=npatch, rng=rng)
patch_centers = rand_cat.patch_centers
cat_nopatch = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch_centers=patch_centers)
print('cat patches = ',np.unique(cat.patch))
print('len = ',cat.nobj, cat.ntot)
assert cat.nobj == ngal
print('Patch\tNtot')
for p in cat.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
# Start with KKK, since relatively simple.
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat_nopatch)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
kkk.process(cat)
np.testing.assert_allclose(kkk.zeta, kkk1.zeta)
kkk_zeta_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat1)
print('zeta = ',kkk1.zeta.ravel())
kkk_zeta_list.append(kkk1.zeta.ravel())
kkk_zeta_list = np.array(kkk_zeta_list)
cov = np.cov(kkk_zeta_list.T, bias=True) * (len(kkk_zeta_list)-1)
varzeta = np.diagonal(np.cov(kkk_zeta_list.T, bias=True)) * (len(kkk_zeta_list)-1)
print('KKK: treecorr jackknife varzeta = ',kkk.varzeta.ravel())
print('KKK: direct jackknife varzeta = ',varzeta)
np.testing.assert_allclose(kkk.varzeta.ravel(), varzeta)
# Now GGG
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat_nopatch)
ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
ggg.process(cat)
np.testing.assert_allclose(ggg.gam0, ggg1.gam0)
np.testing.assert_allclose(ggg.gam1, ggg1.gam1)
np.testing.assert_allclose(ggg.gam2, ggg1.gam2)
np.testing.assert_allclose(ggg.gam3, ggg1.gam3)
ggg_gam0_list = []
ggg_gam1_list = []
ggg_gam2_list = []
ggg_gam3_list = []
ggg_map3_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat1)
ggg_gam0_list.append(ggg1.gam0.ravel())
ggg_gam1_list.append(ggg1.gam1.ravel())
ggg_gam2_list.append(ggg1.gam2.ravel())
ggg_gam3_list.append(ggg1.gam3.ravel())
ggg_map3_list.append(ggg1.calculateMap3()[0])
ggg_gam0_list = np.array(ggg_gam0_list)
vargam0 = np.diagonal(np.cov(ggg_gam0_list.T, bias=True)) * (len(ggg_gam0_list)-1)
print('GGG: treecorr jackknife vargam0 = ',ggg.vargam0.ravel())
print('GGG: direct jackknife vargam0 = ',vargam0)
np.testing.assert_allclose(ggg.vargam0.ravel(), vargam0)
ggg_gam1_list = np.array(ggg_gam1_list)
vargam1 = np.diagonal(np.cov(ggg_gam1_list.T, bias=True)) * (len(ggg_gam1_list)-1)
print('GGG: treecorr jackknife vargam1 = ',ggg.vargam1.ravel())
print('GGG: direct jackknife vargam1 = ',vargam1)
np.testing.assert_allclose(ggg.vargam1.ravel(), vargam1)
ggg_gam2_list = np.array(ggg_gam2_list)
vargam2 = np.diagonal(np.cov(ggg_gam2_list.T, bias=True)) * (len(ggg_gam2_list)-1)
print('GGG: treecorr jackknife vargam2 = ',ggg.vargam2.ravel())
print('GGG: direct jackknife vargam2 = ',vargam2)
np.testing.assert_allclose(ggg.vargam2.ravel(), vargam2)
ggg_gam3_list = np.array(ggg_gam3_list)
vargam3 = np.diagonal(np.cov(ggg_gam3_list.T, bias=True)) * (len(ggg_gam3_list)-1)
print('GGG: treecorr jackknife vargam3 = ',ggg.vargam3.ravel())
print('GGG: direct jackknife vargam3 = ',vargam3)
np.testing.assert_allclose(ggg.vargam3.ravel(), vargam3)
ggg_map3_list = np.array(ggg_map3_list)
varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1)
covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife',
lambda corrs: corrs[0].calculateMap3()[0])
print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3))
print('GGG: direct jackknife varmap3 = ',varmap3)
np.testing.assert_allclose(np.diagonal(covmap3), varmap3)
# Finally NNN, where we need to use randoms. Both simple and compensated.
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
drr = ddd.copy()
rdd = ddd.copy()
rrr = ddd.copy()
ddd.process(cat)
drr.process(cat, rand_cat)
rdd.process(rand_cat, cat)
rrr.process(rand_cat)
zeta1_list = []
zeta2_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
rand_cat1 = treecorr.Catalog(x=rand_cat.x[rand_cat.patch != i],
y=rand_cat.y[rand_cat.patch != i])
ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
drr1 = ddd1.copy()
rdd1 = ddd1.copy()
rrr1 = ddd1.copy()
ddd1.process(cat1)
drr1.process(cat1, rand_cat1)
rdd1.process(rand_cat1, cat1)
rrr1.process(rand_cat1)
zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel())
zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel())
print('simple')
zeta1_list = np.array(zeta1_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr)
varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta1)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta1)
print('compensated')
print(zeta2_list)
zeta2_list = np.array(zeta2_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd)
varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta2)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta2)
# Can't do patch calculation with different numbers of patches in rrr, drr, rdd.
rand_cat3 = treecorr.Catalog(x=rx, y=ry, npatch=3)
cat3 = treecorr.Catalog(x=x, y=y, patch_centers=rand_cat3.patch_centers)
rrr3 = rrr.copy()
drr3 = drr.copy()
rdd3 = rdd.copy()
rrr3.process(rand_cat3)
drr3.process(cat3, rand_cat3)
rdd3.process(rand_cat3, cat3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3, drr, rdd)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd)
@timer
def test_finalize_false():
nsource = 80
nhalo = 100
npatch = 16
# Make three independent data sets
rng = np.random.RandomState(8675309)
x_1, y_1, g1_1, g2_1, k_1 = generate_shear_field(nsource, nhalo, rng)
x_2, y_2, g1_2, g2_2, k_2 = generate_shear_field(nsource, nhalo, rng)
x_3, y_3, g1_3, g2_3, k_3 = generate_shear_field(nsource, nhalo, rng)
# Make a single catalog with all three together
cat = treecorr.Catalog(x=np.concatenate([x_1, x_2, x_3]),
y=np.concatenate([y_1, y_2, y_3]),
g1=np.concatenate([g1_1, g1_2, g1_3]),
g2=np.concatenate([g2_1, g2_2, g2_3]),
k=np.concatenate([k_1, k_2, k_3]),
npatch=npatch)
# Now the three separately, using the same patch centers
cat1 = treecorr.Catalog(x=x_1, y=y_1, g1=g1_1, g2=g2_1, k=k_1, patch_centers=cat.patch_centers)
cat2 = treecorr.Catalog(x=x_2, y=y_2, g1=g1_2, g2=g2_2, k=k_2, patch_centers=cat.patch_centers)
cat3 = treecorr.Catalog(x=x_3, y=y_3, g1=g1_3, g2=g2_3, k=k_3, patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat1.patch, cat.patch[0:nsource])
np.testing.assert_array_equal(cat2.patch, cat.patch[nsource:2*nsource])
np.testing.assert_array_equal(cat3.patch, cat.patch[2*nsource:3*nsource])
# KKK auto
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk1.process(cat)
kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk2.process(cat1, initialize=True, finalize=False)
kkk2.process(cat2, initialize=False, finalize=False)
kkk2.process(cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat2, cat1, initialize=False, finalize=False)
kkk2.process(cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat1, initialize=False, finalize=False)
kkk2.process(cat3, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross12
cat23 = treecorr.Catalog(x=np.concatenate([x_2, x_3]),
y=np.concatenate([y_2, y_3]),
g1=np.concatenate([g1_2, g1_3]),
g2=np.concatenate([g2_2, g2_3]),
k=np.concatenate([k_2, k_3]),
patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource])
kkk1.process(cat1, cat23)
kkk2.process(cat1, cat2, initialize=True, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
| np.testing.assert_allclose(kkk1.meand3, kkk2.meand3) | numpy.testing.assert_allclose |
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import selenium
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.common.exceptions import ElementClickInterceptedException
import time
from selenium.webdriver.common.keys import Keys
#-------------------------------------------------------------------------------
# Funciones para la obtención de datos
def insertNone(datos):
"""
Función que trata los datos nulos de cada obra
:param datos: lista con los datos actuales de la obra
:return: lista con todas las variables
"""
columnas = ["Número de catálogo","Autor","Título","Fecha",
"Técnica","Soporte","Dimensión","Serie","Procedencia"]
for col in columnas:
if col not in datos:
indice = columnas.index(col)*2
datos.insert(indice, col)
datos.insert(indice + 1, "None")
return datos
def getPage(srcUrl):
"""
Extrae la estructura HTML de una página web.
:param srcUrl: Página web de la que extraer el contenido, debe ser un enlace web en formato string.
:return: Estructura HTML de la web
"""
pageStr = requests.get(srcUrl)
pageCont = BeautifulSoup(pageStr.content, features="html.parser")
return pageCont
def getLinks(pageCont):
"""
Extrae los enlaces url dentro de la clase "presentacion-mosaico" de una web a partir de su estructura HTML.
:param pageCont: Estructura HTML de una web.
:return: lista con los enlaces url mencionados
"""
mosaico = pageCont.find_all(class_="presentacion-mosaico")
links = []
for obra in mosaico:
prueba = obra.find('a')
links.append(prueba.get("href"))
return links
def getLinksMax(pageCont,max):
"""
Extrae los enlaces url dentro de la clase "presentacion-mosaico" de una web a partir de su estructura HTML.
Contiene un numero mázimo de links
:param pageCont: Estructura HTML de una web.
:param max: número maximo de links
:return: lista con los enlaces url mencionados
"""
mosaico = pageCont.find_all(class_="presentacion-mosaico")
links = []
len(mosaico)
for i in range(max):
prueba = mosaico[i].find('a')
links.append(prueba.get("href"))
return links
def getDatos(pageCont):
"""
Extrae los datos incluidos en la ficha técnica
:param pageCont: Estructura HTML de una web
:return: Lista con el contenido de la ficha técnica
"""
ficha = pageCont.find(class_="ficha-tecnica")
if ficha is not None:
tags = ficha.find_all(['dt', 'dd']) # En dd están los datos de cada pieza y en dt el nombre de la variable
elementos = []
for elemento in tags:
if elemento.name == "dd": # Contenido
cadena = []
for x in elemento.stripped_strings:
cadena.append(x)
elementos.append("".join(cadena))
else:
for z in elemento.stripped_strings: # Etiqueta
elementos.append(z)
# Añadido de las columnas pendientes, columna de Url y obtención del link
if len(elementos) < 18:
elementos = insertNone(elementos)
elementos.append("UrlImagen")
elementos.append(getLinkImage(pageCont))
else:
elementos = ["Número de catálogo","","Autor","","Título","","Fecha","",
"Técnica","","Soporte","","Dimensión","","Serie","","Procedencia","","UrlImagen",""]
return elementos
#-------------------------------------------------------------------------------
# Funciones para la descarga de imágenes
def load_requests(source_url):
"""
Descarga imagen contenida en source_url
:param source_url:
:return: None
"""
r = requests.get(source_url, stream = True)
if r.status_code == 200:
aSplit = source_url.split('/')
ruta = "./img/"+aSplit[len(aSplit)-1]
print(ruta)
output = open(ruta,"wb")
for chunk in r:
output.write(chunk)
output.close()
def getLinkImage(pageCont):
"""
Extrae el link de la imagen de una obra contenida en pageCont
:param pageCont:
:return: Link de la imagen de la obra
"""
linksImagen = []
imagenInfo = pageCont.find(class_="section-viewer")
for img in imagenInfo.findAll('img'): # Obtención de todos los img-src
linksImagen.append(img.get('src'))
return list(filter(None, linksImagen))[0]
#-------------------------------------------------------------------------------
# Desplegado completo de una página dinámica
option = webdriver.ChromeOptions() # Iniciación de la conexión con el navegador
option.add_argument("--headless") # Opcion para que no aparezca el navegador
# Install Driver
driver = webdriver.Chrome(ChromeDriverManager().install(), options = option)
webBase = "https://www.museodelprado.es/coleccion/obras-de-arte"
driver.get(webBase)
try:
driver.find_element_by_tag_name('body').send_keys(Keys.END) # Función que permite llegar al final de la pagina web
print("Fin pagina")
time.sleep(600) # Necesitamos 2000 segundos hasta que la pagina llega al final
print("Fin time")
except:
print("error")
# Obtenemos el archivo html para beautifulsoup
body = driver.execute_script("return document.body")
source = body.get_attribute('innerHTML')
# Extraemos la estructura de la página base
pagBaseStr = BeautifulSoup(source, "html.parser")
driver.close()
# Extraemos los links
enlacesObras = getLinks(pagBaseStr)
numMax = len(enlacesObras)
#enlacesObras = getLinksMax(pagBaseStr,numMax)
# Extraemos los datos
datos = []
inicio = 1
for link in enlacesObras:
try:
print(inicio,"/",numMax)
tempPage = getPage(link)
tempData = getDatos(tempPage)
if len(tempData)!= 20:
print(tempData)
datos.append(tempData)
inicio += 1
time.sleep(0.5)
except Exception as e:
print(str(e))
pass
datosNP = np.array(datos)
for i, _array in enumerate(datosNP):
if i == 0: # Sólo se va a ejecutar en la primera iteración luego iniciamos el df
tempData = np.reshape(datosNP[i], (10, 2)) # Lista de listas
fichaTec = tempData[:, 1] # Extrae la info
datosDF = pd.DataFrame(data=[fichaTec], columns=tempData[:, 0])
else:
tempData = | np.reshape(datosNP[i], (10, 2)) | numpy.reshape |
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import expit
from decimal import Decimal
def sigmoid(z):
# SIGMOID Compute sigmoid functoon
# J = SIGMOID(z) computes the sigmoid of z.
g = np.zeros(z.shape)
g = expit(z)
return g
def sigmoidGradient(z):
#SIGMOIDGRADIENT returns the gradient of the sigmoid function
#evaluated at z
g = 1.0 / (1.0 + np.exp(-z))
g = g * (1 - g)
return g
def displayData(X, example_width=None):
# [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
# stored in X in a nice grid. It returns the figure handle h and the
# displayed array if requested.
# closes previously opened figure. preventing a
# warning after opening too many figures
plt.close()
# creates new figure
plt.figure()
# turns 1D X array into 2D
if X.ndim == 1:
X = np.reshape(X, (-1, X.shape[0]))
# Set example_width automatically if not passed in
if not example_width or not 'example_width' in locals():
example_width = int(round(math.sqrt(X.shape[1])))
# Gray Image
plt.set_cmap("gray")
# Compute rows, cols
m, n = X.shape
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(math.floor(math.sqrt(m)))
display_cols = int(math.ceil(m / display_rows))
# Between images padding
pad = 1
# Setup blank display
display_array = -np.ones((pad + display_rows * (example_height + pad), pad + display_cols * (example_width + pad)))
# Copy each example into a patch on the display array
curr_ex = 1
for j in range(1, display_rows + 1):
for i in range(1, display_cols + 1):
if curr_ex > m:
break
# Copy the patch
# Get the max value of the patch to normalize all examples
max_val = max(abs(X[curr_ex - 1, :]))
rows = pad + (j - 1) * (example_height + pad) + np.array(range(example_height))
cols = pad + (i - 1) * (example_width + pad) + np.array(range(example_width))
# Basic (vs. advanced) indexing/slicing is necessary so that we look can assign
# values directly to display_array and not to a copy of its subarray.
# from stackoverflow.com/a/7960811/583834 and
# bytes.com/topic/python/answers/759181-help-slicing-replacing-matrix-sections
# Also notice the order="F" parameter on the reshape call - this is because python's
# default reshape function uses "C-like index order, with the last axis index
# changing fastest, back to the first axis index changing slowest" i.e.
# it first fills out the first row/the first index, then the second row, etc.
# matlab uses "Fortran-like index order, with the first index changing fastest,
# and the last index changing slowest" i.e. it first fills out the first column,
# then the second column, etc. This latter behaviour is what we want.
# Alternatively, we can keep the deault order="C" and then transpose the result
# from the reshape call.
display_array[rows[0]:rows[-1] + 1, cols[0]:cols[-1] + 1] = np.reshape(X[curr_ex - 1, :],
(example_height, example_width),
order="F") / max_val
curr_ex += 1
if curr_ex > m:
break
# Display Image
h = plt.imshow(display_array, vmin=-1, vmax=1)
# Do not show axis
plt.axis('off')
plt.show(block=False)
return h, display_array
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, \
num_labels, X, y, lambda_reg):
# NNCOSTFUNCTION Implements the neural network cost function for a two layer
# neural network which performs classification
# [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
# X, y, lambda) computes the cost and gradient of the neural network. The
# parameters for the neural network are "unrolled" into the vector
# nn_params and need to be converted back into the weight matrices.
#
# The returned parameter grad should be a "unrolled" vector of the
# partial derivatives of the neural network.
# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
# for our 2 layer neural network
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)], \
(hidden_layer_size, input_layer_size + 1), order='F')
Theta2 = np.reshape(nn_params[hidden_layer_size * (input_layer_size + 1):], \
(num_labels, hidden_layer_size + 1), order='F')
# Setup some useful variables
m = len(X)
# # You need to return the following variables correctly
J = 0;
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# ====================== YOUR CODE HERE ======================
# Instructions: You should complete the code by working through the
# following parts.
#
# Part 1: Feedforward the neural network and return the cost in the
# variable J. After implementing Part 1, you can verify that your
# cost function computation is correct by verifying the cost
# computed in ex4.m
#
# Part 2: Implement the backpropagation algorithm to compute the gradients
# Theta1_grad and Theta2_grad. You should return the partial derivatives of
# the cost function with respect to Theta1 and Theta2 in Theta1_grad and
# Theta2_grad, respectively. After implementing Part 2, you can check
# that your implementation is correct by running checkNNGradients
#
# Note: The vector y passed into the function is a vector of labels
# containing values from 1..K. You need to map this vector into a
# binary vector of 1's and 0's to be used with the neural network
# cost function.
#
# Hint: We recommend implementing backpropagation using a for-loop
# over the training examples if you are implementing it for the
# first time.
#
# Part 3: Implement regularization with the cost function and gradients.
#
# Hint: You can implement this around the code for
# backpropagation. That is, you can compute the gradients for
# the regularization separately and then add them to Theta1_grad
# and Theta2_grad from Part 2.
#
# add column of ones as bias unit from input layer to second layer
X = np.column_stack((np.ones((m, 1)), X)) # = a1
# calculate second layer as sigmoid( z2 ) where z2 = Theta1 * a1
a2 = sigmoid(np.dot(X, Theta1.T))
# add column of ones as bias unit from second layer to third layer
a2 = np.column_stack((np.ones((a2.shape[0], 1)), a2))
# calculate third layer as sigmoid ( z3 ) where z3 = Theta2 * a2
a3 = sigmoid(np.dot(a2, Theta2.T))
# %% COST FUNCTION CALCULATION
# % NONREGULARIZED COST FUNCTION
# recode labels as vectors containing only values 0 or 1
labels = y
# set y to be matrix of size m x k
y = np.zeros((m, num_labels))
# for every label, convert it into vector of 0s and a 1 in the appropriate position
for i in range(m):
y[i, labels[i] - 1] = 1
# at this point, both a3 and y are m x k matrices, where m is the number of inputs
# and k is the number of hypotheses. Given that the cost function is a sum
# over m and k, loop over m and in each loop, sum over k by doing a sum over the row
cost = 0
for i in range(m):
cost += np.sum(y[i] * np.log(a3[i]) + (1 - y[i]) * | np.log(1 - a3[i]) | numpy.log |
import json
import numpy as np
from scipy.constants import e as qe
from scipy.constants import m_p
import xpart as xp
bunch_intensity = 1e11
sigma_z = 22.5e-2
n_part = int(5e6)
nemitt_x = 2e-6
nemitt_y = 2.5e-6
filename = ('../../xtrack/test_data/sps_w_spacecharge/'
'optics_and_co_at_start_ring.json')
with open(filename, 'r') as fid:
ddd = json.load(fid)
RR = np.array(ddd['RR_madx'])
part_on_co = xp.Particles.from_dict(ddd['particle_on_madx_co'])
part = xp.generate_matched_gaussian_bunch(
num_particles=n_part, total_intensity_particles=bunch_intensity,
nemitt_x=nemitt_x, nemitt_y=nemitt_y, sigma_z=sigma_z,
particle_ref=part_on_co, R_matrix=RR,
circumference=6911., alpha_momentum_compaction=0.0030777,
rf_harmonic=4620, rf_voltage=3e6, rf_phase=0)
# CHECKS
y_rms = np.std(part.y)
py_rms = | np.std(part.py) | numpy.std |
import dataclasses
import os
import typing
from copy import deepcopy
from io import BytesIO
from pathlib import Path
import numpy as np
from PartSegCore.algorithm_describe_base import AlgorithmProperty
from PartSegCore.analysis.io_utils import ProjectTuple
from PartSegCore.analysis.save_functions import SaveCmap
from PartSegCore.channel_class import Channel
from PartSegCore.io_utils import SaveBase, SaveROIAsNumpy, SaveROIAsTIFF
from PartSegCore.roi_info import ROIInfo
from PartSegCore.universal_const import Units
class SaveModeling(SaveBase):
@classmethod
def get_fields(cls) -> typing.List[typing.Union[AlgorithmProperty, str]]:
return [
AlgorithmProperty("channel", "Channel", 0, value_type=Channel),
AlgorithmProperty("clip", "Clip area", False),
AlgorithmProperty(
"reverse", "Reverse", False, help_text="Reverse brightness off image (for electron microscopy)"
),
AlgorithmProperty("units", "Units", Units.nm, value_type=Units),
]
@classmethod
def get_name(cls):
return "Modeling Data"
@classmethod
def get_default_extension(cls):
return ""
@classmethod
def get_short_name(cls):
return "modeling data"
@classmethod
def save(
cls,
save_location: typing.Union[str, BytesIO, Path],
project_info: ProjectTuple,
parameters: dict,
range_changed=None,
step_changed=None,
):
if not os.path.exists(save_location):
os.makedirs(save_location)
if not os.path.isdir(save_location):
raise OSError("save location exist and is not a directory")
parameters = deepcopy(parameters)
if parameters["clip"]:
points = np.nonzero(project_info.roi_info.roi)
lower_bound = | np.min(points, axis=1) | numpy.min |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
if "PyPy" not in platform.python_implementation():
from scipy.io import loadmat, savemat
from Florence.Tensor import makezero, itemfreq, unique2d, in2d
from Florence.Utils import insensitive
from .vtk_writer import write_vtu
try:
import meshpy.triangle as triangle
has_meshpy = True
except ImportError:
has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
# self.has_meshpy = has_meshpy
def SetElements(self,arr):
self.elements = arr
def SetPoints(self,arr):
self.points = arr
def SetEdges(self,arr):
self.edges = arr
def SetFaces(self,arr):
self.faces = arr
def GetElements(self):
return self.elements
def GetPoints(self):
return self.points
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetInteriorEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetInteriorEdgesTri()
elif self.element_type == "quad":
self.GetInteriorEdgesQuad()
elif self.element_type == "pent":
self.GetInteriorEdgesPent()
elif self.element_type == "tet":
self.GetInteriorEdgesTet()
elif self.element_type == "hex":
self.GetInteriorEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.interior_edges
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetInteriorFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetInteriorFacesTet()
elif self.element_type == "hex":
self.GetInteriorFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.interior_faces
def GetElementsEdgeNumbering(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsEdgeNumberingTri()
elif self.element_type == "quad":
return self.GetElementsEdgeNumberingQuad()
else:
raise ValueError('Type of element not understood')
return self.edge_to_element
def GetElementsWithBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsWithBoundaryEdgesTri()
elif self.element_type == "quad":
return self.GetElementsWithBoundaryEdgesQuad()
else:
raise ValueError('Type of element not understood')
return self.boundary_edge_to_element
def GetElementsFaceNumbering(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsFaceNumberingTet()
elif self.element_type == "hex":
return self.GetElementsFaceNumberingHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.face_to_element
def GetElementsWithBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsWithBoundaryFacesTet()
elif self.element_type == "hex":
return self.GetElementsWithBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.boundary_face_to_element
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetEdgesTri(self):
"""Find all edges of a triangular mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementTri(p-1)[0]
# CHECK IF FACES ARE ALREADY AVAILABLE
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1 and self.all_edges.shape[1] == p+1:
warn("Mesh edges seem to be already computed. I am going to recompute them")
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.zeros((3*self.elements.shape[0],p+1),dtype=np.uint64)
edges[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
edges[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
edges[2*self.elements.shape[0]:,:] = self.elements[:,node_arranger[2,:]]
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesTet
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesTet":
self.all_edges = edges
return edges
def GetBoundaryEdgesTri(self):
"""Find boundary edges (lines) of triangular mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTri(p-1)[0]
# CONCATENATE ALL THE EDGES MADE FROM ELEMENTS
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesTri(self):
"""Computes interior edges of a triangular mesh
returns:
interior_edges ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTri()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTri()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger, | np.array(x) | numpy.array |
import numpy as np
from prml.linear.classifier import Classifier
from prml.rv.gaussian import Gaussian
class LinearDiscriminantAnalyzer(Classifier):
"""
Linear discriminant analysis model
"""
def _fit(self, X, t, clip_min_norm=1e-10):
self._check_input(X)
self._check_target(t)
self._check_binary(t)
X0 = X[t == 0]
X1 = X[t == 1]
m0 = np.mean(X0, axis=0)
m1 = | np.mean(X1, axis=0) | numpy.mean |
#!/usr/bin/env python
import argparse
import sys
parser = argparse.ArgumentParser(description="")
parser.add_argument("--bam", "-s", help="input bam file + index mapping contigs to location wehre original PSVs were called")
parser.add_argument("--psvs", "-p", nargs='+', help="List of vcf files describing the different PSVs, must have form, group.{\d+}.vcf", type=argparse.FileType('r'))
parser.add_argument("--check", help="Add an input fasta file (ASM.assemblies.fasta) to check if the PSV values are correct", default = None)
parser.add_argument("outfile",nargs="?", help="output table file", type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-d', action="store_true", default=False)
args = parser.parse_args()
import pysam
import re
import pandas as pd
import numpy as np
complement = {"A":"T", "T":"A", "G":"C", "C":"G", "N":"N"}
def readPsvs():
dfs = []
for myfile in args.psvs:
match = re.match(".+\.(\d+)\.vcf", myfile.name)
assert match is not None
group = int(match.group(1))
df = pd.read_table(myfile, comment="#", header = None)
df = df[[0, 1, 3, 4]]
df.columns = ["contig", "pos", "ref", "alt"]
df["group"] = group
# change from one to zero based indexing
df["pos"] = df["pos"] - 1
df["group"] = group
dfs.append(df)
rtn = pd.concat(dfs)
return(rtn)
def readAlns():
samfile = pysam.AlignmentFile(args.bam)
alns = {}
for alnSeg in samfile.fetch(until_eof=True):
qpos, rpos = zip(*alnSeg.get_aligned_pairs())
rpos = np.array(rpos)
qpos = | np.array(qpos) | numpy.array |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Script / command line tool for merging supervoxels into single labels that
# were manually merged using the knossos standalone tool (annotation file).
#import numpy as np
import time
import argparse
import os
from io import StringIO
import zipfile
import glob
import numpy as np
from scipy import ndimage as nd
import scipy.ndimage.filters as filters
from dpCubeIter import dpCubeIter
from utils.typesh5 import emLabels
from dpWriteh5 import dpWriteh5
from dpLoadh5 import dpLoadh5
class dpLabelMerger(emLabels):
# Constants
LIST_ARGS = dpLoadh5.LIST_ARGS + dpCubeIter.LIST_ARGS + ['segmentation_values']
# type to use for all processing operations
PDTYPE = np.double
def __init__(self, args):
emLabels.__init__(self,args)
# save the command line argument dict as a string
out = StringIO(); print( vars(args), file=out )
self.arg_str = out.getvalue(); out.close()
# xxx - meh, need to fix this
if not self.data_type_out: self.data_type_out = self.data_type
assert( len(self.fileprefixes) == 1 and len(self.filepaths) == 1 ) # prefix / path for h5 label inputs only
self.segmentation_levels = len(self.segmentation_values)
# print out all initialized variables in verbose mode
if self.dpLabelMerger_verbose: print('dpLabelMerger, verbose mode:\n'); print(vars(self))
# copied this out of dpResample.py, but all dims resampled
self.nresample_dims = 3
self.nslices = self.dsfactor**self.nresample_dims
self.slices = [None]*self.nslices; f = self.dsfactor; ff = f*f
for i in range(f):
for j in range(f):
for k in range(f):
self.slices[i*ff + j*f + k] = np.s_[i::f,j::f,k::f]
assert(self.contour_lvl >= 0 and self.contour_lvl < 1) # bad choice
def doMerging(self):
volume_init = False
cur_volume = np.zeros((4,), dtype=np.uint32)
W = np.ones(self.smooth, dtype=self.PDTYPE) / self.smooth.prod() # smoothing kernel
for s in range(self.segmentation_levels):
self.cubeIter = dpCubeIter.cubeIterGen(self.volume_range_beg,self.volume_range_end,self.overlap,
self.cube_size, chunksize=self.chunksize, left_remainder_size=self.left_remainder_size,
right_remainder_size=self.right_remainder_size, leave_edge=self.leave_edge)
self.subgroups[-1] = self.segmentation_values[s]
cur_volume[3] = s
for self.volume_info,n in zip(self.cubeIter, range(self.cubeIter.volume_size)):
cur_volume[:3], self.size, self.chunk, self.offset, suffixes, _, _, _, _ = self.volume_info
self.srcfile = os.path.join(self.filepaths[0], self.fileprefixes[0] + suffixes[0] + '.h5')
self.inith5()
# only load superchunks that contain some object supervoxels
ind = np.ravel_multi_index(cur_volume, self.volume_step_seg)
if len(self.sc_to_objs[ind]) < 1: continue
if self.dpLabelMerger_verbose:
print('Merge in chunk %d %d %d, seglevel %d' % tuple(self.chunk.tolist() + [s])); t = time.time()
self.readCubeToBuffers()
cube = self.data_cube; cur_ncomps = self.data_attrs['types_nlabels'].sum()
# xxx - writing to an hdf5 file in chunks or as a single volume from memory does not necessarily
# need to be tied to dsfactor==1, can add another command-line option for this.
if not volume_init:
if self.dsfactor > 1:
volume_init=True; f = self.dsfactor
new_attrs = self.data_attrs
# changed this to be added when raw hdf5 is created
if 'factor' not in new_attrs:
new_attrs['factor'] = np.ones((dpLoadh5.ND,),dtype=np.double)
new_datasize = self.datasize.copy()
if 'boundary' in new_attrs: # proxy for whether attrs is there at all
# update the scale and compute new chunk/size/offset
new_attrs['scale'] *= f; new_attrs['boundary'] //= f
new_attrs['nchunks'] = np.ceil(new_attrs['nchunks'] / f).astype(np.int32)
# this attribute is saved as downsample factor
new_attrs['factor'] *= f; new_datasize //= f
new_data = np.zeros(new_datasize, dtype=self.data_type_out)
else:
# initialize by just writing a small chunk of zeros
self.inith5()
self.data_attrs['types_nlabels'] = [self.nobjects]
self.fillvalue = 0 # non-zero fill value not useful for merged "neurons"
# xxx - this probably should be cleaned up, see comments in dpWriteh5.py
orig_dataset = self.dataset; orig_subgroups = self.subgroups; orig_offset = self.offset
self.writeCube(data=np.zeros((32,32,32), dtype=self.data_type_out))
# reopen the dataset and write to it dynamically below
dset, group, h5file = self.createh5(self.outfile)
# xxx - this probably should be cleaned up, see comments in dpWriteh5.py
self.dataset = orig_dataset; self.subgroups = orig_subgroups; self.offset = orig_offset
# much of this code copied from the label mesher, extract supervoxel and smooth
# Pad data with zeros so that meshes are closed on the edges
sizes = np.array(cube.shape); r = self.smooth.max() + 1; sz = sizes + 2*r;
dataPad = np.zeros(sz, dtype=self.data_type); dataPad[r:sz[0]-r, r:sz[1]-r, r:sz[2]-r] = cube
# get bounding boxes for all supervoxels in this volume
svox_bnd = nd.measurements.find_objects(dataPad, cur_ncomps)
for cobj in self.sc_to_objs[ind]:
#self.mergelists[cobj] = {'ids':allids[:,0], 'scids':allids[:,1:5], 'inds':inds}
cinds = np.nonzero(ind == self.mergelists[cobj]['inds'])[0]
for j in cinds:
cid = self.mergelists[cobj]['ids'][j]
cur_bnd = svox_bnd[cid-1]
imin = np.array([x.start for x in cur_bnd]); imax = np.array([x.stop-1 for x in cur_bnd])
# min and max coordinates of this seed within zero padded cube
pmin = imin - r; pmax = imax + r;
# min coordinates of this seed relative to original (non-padded cube)
mins = pmin - r; rngs = pmax - pmin + 1
crpdpls = (dataPad[pmin[0]:pmax[0]+1,pmin[1]:pmax[1]+1,
pmin[2]:pmax[2]+1] == cid).astype(self.PDTYPE)
if W.size==0 or (W==1).all():
crpdplsSm = crpdpls
else:
crpdplsSm = filters.convolve(crpdpls, W, mode='reflect', cval=0.0, origin=0)
# if smoothing results in nothing above contour level, use original without smoothing
if (crpdplsSm > self.contour_lvl).any():
del crpdpls; crpdpls = crpdplsSm
del crpdplsSm
# save bounds relative to entire dataset
bounds_beg = mins + self.dataset_index
#bounds_end = mins + rngs - 1 + self.dataset_index;
bounds_end = mins + rngs + self.dataset_index; # exclusive end, python-style
if self.dsfactor > 1:
# downsample the smoothed supervoxel and assign it in the new downsampled volume
b = bounds_beg.copy(); b //= f
# stupid integer arithmetic, need to add 1 if it's not a multiple of the ds factor
e = b + (bounds_end-bounds_beg)//f + ((bounds_end-bounds_beg)%f != 0)
new_data[b[0]:e[0],b[1]:e[1],b[2]:e[2]][crpdpls[self.slices[0]] > self.contour_lvl] = cobj
else:
# write non-downsampled directly to h5 output file
b = bounds_beg; e = b + (bounds_end-bounds_beg)
# this is hard-coded to write the dataset in F-order (normal convention).
tmp = | np.transpose(dset[b[2]:e[2],b[1]:e[1],b[0]:e[0]], (2,1,0)) | numpy.transpose |
""" Test mesh operations """
import pytest
import os
import numpy as np
import shutil
import gzip
import vtk
from vtk.util.vtkConstants import VTK_TRIANGLE, VTK_LINE, VTK_VERTEX
from brainspace.vtk_interface import wrap_vtk
from brainspace.vtk_interface.wrappers import BSPolyData
from brainspace.mesh import mesh_io as mio
from brainspace.mesh import mesh_elements as me
from brainspace.mesh import mesh_creation as mc
from brainspace.mesh import mesh_operations as mop
from brainspace.mesh import mesh_cluster as mcluster
from brainspace.mesh import array_operations as aop
parametrize = pytest.mark.parametrize
try:
import nibabel as nb
except ImportError:
nb = None
def _generate_sphere():
s = vtk.vtkSphereSource()
s.Update()
return wrap_vtk(s.GetOutput())
@parametrize('ext', ['fs', 'asc', 'ply', 'vtp', 'vtk'])
def test_io(ext):
s = _generate_sphere()
root_pth = os.path.dirname(__file__)
io_pth = os.path.join(root_pth, 'test_sphere_io.{ext}').format(ext=ext)
mio.write_surface(s, io_pth)
s2 = mio.read_surface(io_pth)
io_gz_pth = os.path.join(root_pth, 'test_sphere_io.{ext}.gz').format(ext=ext)
with open(io_pth, 'rb') as f1:
with gzip.open(io_gz_pth, 'wb') as f2:
shutil.copyfileobj(f1, f2)
s3 = mio.read_surface(io_gz_pth)
assert np.allclose(s.Points, s2.Points)
assert np.all(s.GetCells2D() == s2.GetCells2D())
assert np.allclose(s.Points, s3.Points)
assert np.all(s.GetCells2D() == s3.GetCells2D())
os.remove(io_pth)
os.remove(io_gz_pth)
@pytest.mark.skipif(nb is None, reason="Requires nibabel")
def test_io_nb():
s = _generate_sphere()
root_pth = os.path.dirname(__file__)
io_pth = os.path.join(root_pth, 'test_sphere_io.gii')
mio.write_surface(s, io_pth)
s2 = mio.read_surface(io_pth)
assert np.allclose(s.Points, s2.Points)
assert np.all(s.GetCells2D() == s2.GetCells2D())
os.remove(io_pth)
def test_mesh_creation():
st = _generate_sphere()
sl = mc.to_lines(st)
sv = mc.to_vertex(st)
# build polydata with points and triangle cells
pd = mc.build_polydata(st.Points, cells=st.GetCells2D())
assert pd.n_points == st.n_points
assert pd.n_cells == st.n_cells
assert np.all(pd.cell_types == np.array([VTK_TRIANGLE]))
assert isinstance(pd, BSPolyData)
# build polydata with points vertices by default
pd = mc.build_polydata(st.Points)
assert pd.n_points == st.n_points
assert pd.n_cells == 0
assert np.all(pd.cell_types == np.array([VTK_VERTEX]))
assert isinstance(pd, BSPolyData)
# build polydata with points vertices
pd = mc.build_polydata(st.Points, cells=sv.GetCells2D())
assert pd.n_points == st.n_points
assert pd.n_cells == st.n_points
assert np.all(pd.cell_types == np.array([VTK_VERTEX]))
assert isinstance(pd, BSPolyData)
# build polydata with lines
pd = mc.build_polydata(st.Points, cells=sl.GetCells2D())
assert pd.n_points == sl.n_points
assert pd.n_cells == sl.n_cells
assert np.all(pd.cell_types == np.array([VTK_LINE]))
assert isinstance(pd, BSPolyData)
@pytest.mark.xfail
def test_drop_cells():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_cells = rs.randint(0, 10, s.n_cells)
cell_name = s.append_array(label_cells, at='c')
n_cells = mop.drop_cells(s, cell_name, upp=3).n_cells
assert n_cells == np.count_nonzero(label_cells > 3)
def test_select_cells():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_cells = rs.randint(0, 10, s.n_cells)
cell_name = s.append_array(label_cells, at='c')
n_cells = mop.select_cells(s, cell_name, low=0, upp=3).n_cells
assert n_cells == np.count_nonzero(label_cells <= 3)
def test_mask_cells():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_cells = rs.randint(0, 10, s.n_cells)
# Warns when array is boolean
with pytest.warns(UserWarning):
mask_cell_name = s.append_array(label_cells > 3, at='c')
n_cells = mop.mask_cells(s, mask_cell_name).n_cells
assert n_cells == np.count_nonzero(label_cells > 3)
@pytest.mark.xfail
def test_drop_points():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_points = rs.randint(0, 10, s.n_points)
point_name = s.append_array(label_points, at='p')
# Warns cause number of selected points may not coincide with
# selected points
with pytest.warns(UserWarning):
n_pts = mop.drop_points(s, point_name, low=0, upp=3).n_points
assert n_pts <= s.n_points
def test_select_points():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_points = rs.randint(0, 10, s.n_points)
point_name = s.append_array(label_points, at='p')
with pytest.warns(UserWarning):
n_pts = mop.select_points(s, point_name, low=0, upp=3).n_points
assert n_pts <= s.n_points
def test_mask_points():
s = _generate_sphere()
rs = np.random.RandomState(0)
label_points = rs.randint(0, 10, s.n_points)
with pytest.warns(UserWarning):
mask_point_name = s.append_array(label_points > 3, at='p')
with pytest.warns(UserWarning):
n_pts = mop.mask_points(s, mask_point_name).n_points
assert n_pts <= s.n_points
def test_mesh_elements():
s = _generate_sphere()
ee = vtk.vtkExtractEdges()
ee.SetInputData(s.VTKObject)
ee.Update()
ee = wrap_vtk(ee.GetOutput())
n_edges = ee.n_cells
assert np.all(me.get_points(s) == s.Points)
assert np.all(me.get_cells(s) == s.GetCells2D())
assert me.get_extent(s).shape == (3,)
pc = me.get_point2cell_connectivity(s)
assert pc.shape == (s.n_points, s.n_cells)
assert pc.dtype == np.uint8
assert np.all(pc.sum(axis=0) == 3)
cp = me.get_cell2point_connectivity(s)
assert pc.dtype == np.uint8
assert (pc - cp.T).nnz == 0
adj = me.get_immediate_adjacency(s)
assert adj.shape == (s.n_points, s.n_points)
assert adj.dtype == np.uint8
assert adj.nnz == (2*n_edges + s.n_points)
adj2 = me.get_immediate_adjacency(s, include_self=False)
assert adj2.shape == (s.n_points, s.n_points)
assert adj2.dtype == np.uint8
assert adj2.nnz == (2 * n_edges)
radj = me.get_ring_adjacency(s)
assert radj.dtype == np.uint8
assert (adj - radj).nnz == 0
radj2 = me.get_ring_adjacency(s, include_self=False)
assert radj2.dtype == np.uint8
assert (adj2 - radj2).nnz == 0
radj3 = me.get_ring_adjacency(s, n_ring=2, include_self=False)
assert radj3.dtype == np.uint8
assert (radj3 - adj2).nnz > 0
d = me.get_immediate_distance(s)
assert d.shape == (s.n_points, s.n_points)
assert d.dtype == np.float
assert d.nnz == adj2.nnz
d2 = me.get_immediate_distance(s, metric='sqeuclidean')
d_sq = d.copy()
d_sq.data **= 2
assert np.allclose(d_sq.A, d2.A)
rd = me.get_ring_distance(s)
assert rd.dtype == np.float
assert np.allclose(d.A, rd.A)
rd2 = me.get_ring_distance(s, n_ring=2)
assert (rd2 - d).nnz > 0
assert me.get_cell_neighbors(s).shape == (s.n_cells, s.n_cells)
assert me.get_edges(s).shape == (n_edges, 2)
assert me.get_edge_length(s).shape == (n_edges,)
assert me.get_boundary_points(s).size == 0
assert me.get_boundary_edges(s).size == 0
assert me.get_boundary_cells(s).size == 0
def test_mesh_cluster():
s = _generate_sphere()
cl_size = 10
nc = s.n_points // cl_size
cl, cc = mcluster.cluster_points(s, n_clusters=nc, random_state=0)
assert np.all(cl > 0)
assert np.unique(cl).size == nc
assert np.unique(cl).size == np.unique(cc).size - 1
cl2 = mcluster.cluster_points(s, n_clusters=nc, with_centers=False,
random_state=0)
assert np.all(cl == cl2)
cl3, _ = mcluster.cluster_points(s, n_clusters=cl_size, is_size=True,
random_state=0)
assert np.all(cl == cl3)
cl4, cc4 = mcluster.cluster_points(s, n_clusters=nc, approach='ward',
random_state=0)
assert np.all(cl4 > 0)
assert np.unique(cl4).size == nc
assert np.unique(cl4).size == np.unique(cc4).size - 1
sp = mcluster.sample_points_clustering(s, random_state=0)
assert np.count_nonzero(sp) == int(s.n_points * 0.1)
sp2 = mcluster.sample_points_clustering(s, keep=0.2, approach='ward',
random_state=0)
assert np.count_nonzero(sp2) == int(s.n_points * 0.2)
def test_array_operations():
s = _generate_sphere()
# Cell area
area = aop.compute_cell_area(s)
assert isinstance(area, np.ndarray)
assert area.shape == (s.n_cells, )
s2 = aop.compute_cell_area(s, append=True, key='CellArea')
assert s is s2
assert np.allclose(s2.CellData['CellArea'], area)
# Cell centers
centers = aop.compute_cell_center(s)
assert isinstance(centers, np.ndarray)
assert centers.shape == (s.n_cells, 3)
s2 = aop.compute_cell_center(s, append=True, key='CellCenter')
assert s is s2
assert np.allclose(s2.CellData['CellCenter'], centers)
# Adjacent cells
n_adj = aop.get_n_adjacent_cells(s)
assert isinstance(n_adj, np.ndarray)
assert n_adj.shape == (s.n_points,)
s2 = aop.get_n_adjacent_cells(s, append=True, key='NAdjCells')
assert s is s2
assert np.all(s2.PointData['NAdjCells'] == n_adj)
# map cell data to point data
area2 = aop.map_celldata_to_pointdata(s, area)
area3 = aop.map_celldata_to_pointdata(s, 'CellArea', red_func='mean')
assert area.dtype == area2.dtype
assert area.dtype == area3.dtype
assert np.allclose(area2, area3)
area4 = aop.map_celldata_to_pointdata(s, 'CellArea', red_func='mean',
dtype=np.float32)
assert area4.dtype == np.float32
for op in ['sum', 'mean', 'mode', 'one_third', 'min', 'max']:
ap = aop.map_celldata_to_pointdata(s, 'CellArea', red_func=op)
assert ap.shape == (s.n_points,)
name = 'CellArea_{}'.format(op)
s2 = aop.map_celldata_to_pointdata(s, 'CellArea', red_func=op,
append=True, key=name)
assert np.allclose(s2.PointData[name], ap)
# map point data to cell data
fc = aop.map_pointdata_to_celldata(s, n_adj)
fc2 = aop.map_pointdata_to_celldata(s, 'NAdjCells', red_func='mean')
assert fc.dtype == fc2.dtype
assert fc.dtype == fc2.dtype
assert np.allclose(fc, fc2)
fc3 = aop.map_pointdata_to_celldata(s, 'NAdjCells', red_func='mean',
dtype=np.float32)
assert fc3.dtype == np.float32
for op in ['sum', 'mean', 'mode', 'one_third', 'min', 'max']:
ac = aop.map_pointdata_to_celldata(s, 'NAdjCells', red_func=op)
assert ac.shape == (s.n_cells,)
name = 'NAdjCells_{}'.format(op)
s2 = aop.map_pointdata_to_celldata(s, 'NAdjCells', red_func=op,
append=True, key=name)
assert np.allclose(s2.CellData[name], ac)
# Point area
area = aop.compute_point_area(s)
assert isinstance(area, np.ndarray)
assert area.shape == (s.n_points, )
s2 = aop.compute_point_area(s, append=True, key='PointArea')
assert s is s2
assert np.allclose(s2.PointData['PointArea'], area)
s2 = aop.compute_point_area(s, cell_area='CellArea', append=True,
key='PointArea2')
assert s is s2
assert np.allclose(s2.PointData['PointArea2'], area)
# Connected components
cc = mop.get_connected_components(s)
assert cc.shape == (s.n_points, )
assert np.unique(cc).size == 1
s2 = mop.get_connected_components(s, append=True, key='components')
assert s is s2
assert np.all(cc == s2.PointData['components'])
# labeling border
labeling = (s.Points[:, 0] > s.Points[:, 0].mean()).astype(int)
s.append_array(labeling, name='parc', at='p')
border = aop.get_labeling_border(s, labeling)
assert border.shape == (s.n_points, )
assert np.unique(border).size == 2
border2 = aop.get_labeling_border(s, 'parc')
assert np.all(border == border2)
# parcellation centroids
cent = aop.get_parcellation_centroids(s, labeling, non_centroid=2)
assert cent.shape == (s.n_points,)
assert np.unique(cent).size == 3
assert np.count_nonzero(cent == 0) == 1
assert np.count_nonzero(cent == 1) == 1
assert | np.count_nonzero(cent == 2) | numpy.count_nonzero |
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used by generate_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import numpy as np
def gen_is_edge_fn(bits):
"""Generate a boolean function for the edge connectivity.
Given a bitstring FEDCBA and a 4x4 matrix, the generated matrix is
[[0, A, B, D],
[0, 0, C, E],
[0, 0, 0, F],
[0, 0, 0, 0]]
Note that this function is agnostic to the actual matrix dimension due to
order in which elements are filled out (column-major, starting from least
significant bit). For example, the same FEDCBA bitstring (0-padded) on a 5x5
matrix is
[[0, A, B, D, 0],
[0, 0, C, E, 0],
[0, 0, 0, F, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
Args:
bits: integer which will be interpreted as a bit mask.
Returns:
vectorized function that returns True when an edge is present.
"""
def is_edge(x, y):
"""Is there an edge from x to y (0-indexed)?"""
if x >= y:
return 0
# Map x, y to index into bit string
index = x + (y * (y - 1) // 2)
return (bits >> index) % 2 == 1
return np.vectorize(is_edge)
def is_full_dag(matrix):
"""Full DAG == all vertices on a path from vert 0 to (V-1).
i.e. no disconnected or "hanging" vertices.
It is sufficient to check for:
1) no rows of 0 except for row V-1 (only output vertex has no out-edges)
2) no cols of 0 except for col 0 (only input vertex has no in-edges)
Args:
matrix: V x V upper-triangular adjacency matrix
Returns:
True if the there are no dangling vertices.
"""
shape = np.shape(matrix)
rows = matrix[:shape[0] - 1, :] == 0
rows = np.all(rows, axis=1) # Any row with all 0 will be True
rows_bad = np.any(rows)
cols = matrix[:, 1:] == 0
cols = np.all(cols, axis=0) # Any col with all 0 will be True
cols_bad = np.any(cols)
return (not rows_bad) and (not cols_bad)
def num_edges(matrix):
"""Computes number of edges in adjacency matrix."""
return np.sum(matrix)
def hash_module(matrix, labeling):
"""Computes a graph-invariance MD5 hash of the matrix and label pair.
Args:
matrix: np.ndarray square upper-triangular adjacency matrix.
labeling: list of int labels of length equal to both dimensions of
matrix.
Returns:
MD5 hash of the matrix and labeling.
"""
vertices = np.shape(matrix)[0]
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
def permute_graph(graph, label, permutation):
"""Permutes the graph and labels based on permutation.
Args:
graph: np.ndarray adjacency matrix.
label: list of labels of same length as graph dimensions.
permutation: a permutation list of ints of same length as graph dimensions.
Returns:
np.ndarray where vertex permutation[v] is vertex v from the original graph
"""
# vertex permutation[v] in new graph is vertex v in the old graph
forward_perm = zip(permutation, list(range(len(permutation))))
inverse_perm = [x[1] for x in sorted(forward_perm)]
edge_fn = lambda x, y: graph[inverse_perm[x], inverse_perm[y]] == 1
new_matrix = np.fromfunction(np.vectorize(edge_fn),
(len(label), len(label)),
dtype=np.int8)
new_label = [label[inverse_perm[i]] for i in range(len(label))]
return new_matrix, new_label
def is_isomorphic(graph1, graph2):
"""Exhaustively checks if 2 graphs are isomorphic."""
matrix1, label1 = np.array(graph1[0]), graph1[1]
matrix2, label2 = np.array(graph2[0]), graph2[1]
assert | np.shape(matrix1) | numpy.shape |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import numpy as np
import numpy.linalg as npla
import scipy as sp
import matplotlib.pyplot as plt
def identity_vf(M, N, RM=None, RN=None):
"""Get vector field for the identity transformation.
This returns the vector field (tau_u, tau_v) corresponding to the identity
transformation, which maps the image plane to itself.
For more details on these vector fields, see the doc for affine_to_vf
inputs:
--------
M : int
vertical (number of rows) size of image plane being worked with
N : int
horizontal (number of cols) size of image plane being worked with
RM : int (optional)
number of points in the M direction desired. by default, this is M,
giving the identity transformation. when a number other than M is
provided, this corresponds to a resampling in the vertical direction.
(we put this operation in this function because it is so naturally
related)
RN : int (optional)
number of points in the N direction desired. by default, this is N,
giving the identity transformation. when a number other than N is
provided, this corresponds to a resampling in the horizontal direction.
(we put this operation in this function because it is so naturally
related)
outputs:
-------
eu : numpy.ndarray (size (M, N))
horizontal component of vector field corresponding to (I, 0)
ev : numpy.ndarray (size (M, N))
vertical component of vector field corresponding to (I, 0)
"""
if RM is None:
RM = M
if RN is None:
RN = N
m_vec = np.linspace(0, M-1, RM)
n_vec = np.linspace(0, N-1, RN)
eu = np.dot(m_vec[:,np.newaxis], np.ones(RN)[:,np.newaxis].T)
ev = np.dot(np.ones(RM)[:,np.newaxis], n_vec[:,np.newaxis].T)
return (eu, ev)
def get_default_pgd_dict(**kwargs):
"""Get default parameter dictionary for proximal gradient descent solvers
Valid key-value pairs are:
init_pt = function with two arguments (m, n), two return values (numpy
arrays of size (m, n))
initial iterate to start GD at, represented as a function: must be a
function with two arguments (m, n), the first of which represents image
height and the second of which represents image width; and must return
a tuple of two numpy arrays, each of size m, n, corresponding to the
initial deformation field
center : numpy array of shape (2,)
Denotes an optional (set to np.array([0,0]) by default) center
coordinate to use when solving the parametric version of the problem
(parametric = True below). All affine transformations computed then
have the form A * ( [i,j] - center ) + center + b, where A may have
more structure if certain values of motion_model is
set. This kind of reparameterization does not make a difference in the
nonparametric version of the problem, so nothing is implemented for
this case.
sigma : float (positive)
Bandwidth parameter in the gaussian filter used for the cost smoothing.
(larger -> smaller cutoff frequency, i.e. more aggressive filtering)
See gaussian_filter_2d
sigma0 : float (positive)
Bandwidth parameter in the gaussian filter used for complementary
smoothing in registration_l2_spike.
(larger -> smaller cutoff frequency, i.e. more aggressive filtering)
See gaussian_filter_2d
sigma_scene : float (positive)
Bandwidth parameter in the gaussian filter used in scene smoothing in
registration_l2_bbg. (larger -> smaller cutoff frequency, i.e. more
aggressive filtering) See gaussian_filter_2d
window : NoneType or numpy array of size (m, n)
Either None, if no window is to be used, or an array of size (m, n)
(same as image size), denoting the cost window function to be applied
(l2 error on residual is filtered, then windowed, before computing).
NOTE: current implementation makes window independent of any setting of
the parameter center specified above
max_iter : int
Maximum number of iterations to run PGD for
tol : float (positive)
Minimum relative tolerance before exiting optimization: optimization
stops if the absolute difference between the loss at successive
iterations is below this threshold.
step : float (positive)
Step size. Currently using constant-step gradient descent
lam : float (positive)
Regularization weight (multiplicative constant on the regularization
term in the loss)
use_nesterov : bool
Whether or not to use Nesterov accelerated gradient descent
use_restarting : bool
Whether or not to use adaptive restarted Nesterov accelerated gradient
descent. Speeds things up significantly, but maybe does not work well
out of the box with proximal iteration
motion_model : string (default 'nonparametric')
Sets the motion model that the registration algorithm will use (i.e.
what constraints are enforced on the transformation vector field).
Values that are implemented are:
'translation'
transformation vector field is constrained to be translational (a
pixel shift of the input). 2-dimensional.
'rigid'
transformation vector field is constrained to be a rigid motion / a
euclidean transformation (i.e. a combination of a
positively-oriented rotation and a translation). 3-dimensional.
'similarity'
transformation vector field is constrained to be a similarity
transformation (i.e. a combination of a global dilation and a
translation). 4-dimensional.
'affine'
transformation vector field is constrained to be an affine
translation (i.e. a combination of a linear map and a translation).
6-dimensional.
'nonparametric'
transformation vector field is allowed to be completely general,
but regularization is added to the gradient descent solver via a
complexity penalty, and the solver runs proximal gradient descent
instead. (see e.g. entry for lambda for more info on associated
parameters).
gamma : float (min 0, max 1)
Nesterov accelerated GD momentum parameter. 0 corresponds to the
"usual" Nesterov AGD. 1 corresponds to "vanilla" GD. The optimal value
for a given problem is the reciprocal condition number. Setting this to
1 is implemented differently from setting use_nesterov to False (the
algorithm is the same; but the former is slower)
theta : float
initial momentum term weight; typically 1
precondition : bool
Whether or not to use a preconditioner (divide by some scalars on each
component of the gradient) for the A and b gradients in parametric
motion models (see motion_model)..
epoch_len : int (positive)
Length of an epoch; used for printing status messages
quiet : bool
If True, nothing will be printed while optimizing.
record_movie : bool
If True, a "movie" gets created from the optimization trajectory and
logged to disk (see movie_fn param). Requires moviepy to be installed
(easy with conda-forge). Potentially requires a ton of memory to store
all the frames (all iterates)
movie_fn : string
If record_movie is True, this gives the location on disk where the
movie will be saved
movie_fps : int
If record_movie is True, this gives the fps of the output movie.
window_pad_size : int
If record_movie is true, denotes the thickness of the border
designating the window to be output in the movie
frame_printing_stride : int
If record_movie is true, denotes the interval at which log information
will be written to the movie (every frame_printing_stride frames, log
info is written; the actual movie fps is set by movie_fps above)
font_size : int
If record_movie is true, denotes the font size used for printing
logging information to the output window. Set smaller for smaller-size
images.
NOTE: No value checking is implemented right now.
Inputs:
--------
kwargs :
any provided key-value pairs will be added to the parameter dictionary,
replacing any defaults they overlap with
Outputs:
--------
param_dict : dict
dict of parameters to be used for a proximal gd solver. Pass these to
e.g. nonparametric_registration or similar solvers.
"""
param_dict = {}
# Problem parameters: filter bandwidths, etc
param_dict['sigma'] = 3
param_dict['sigma_scene'] = 1.5
param_dict['sigma0'] = 1
param_dict['init_pt'] = lambda m, n: identity_vf(m, n)
param_dict['motion_model'] = 'nonparametric'
param_dict['window'] = None
param_dict['center'] = np.zeros((2,))
# Solver parameters: tolerances, stopping conditions, step size, etc
param_dict['max_iter'] = int(1e4)
param_dict['tol'] = 1e-4
param_dict['step'] = 1
param_dict['lam'] = 1
param_dict['use_nesterov'] = False
param_dict['use_restarting'] = False
param_dict['gamma'] = 0
param_dict['theta'] = 1
param_dict['precondition'] = True
# Logging parameters
param_dict['epoch_len'] = 50
param_dict['quiet'] = False
param_dict['record_movie'] = False
param_dict['movie_fn'] = ''
param_dict['movie_fps'] = 30
param_dict['window_pad_size'] = 5
param_dict['frame_printing_stride'] = 10 # 3 times per second
param_dict['font_size'] = 30
param_dict['movie_gt'] = None
param_dict['movie_proc_func'] = None
# Legacy/compatibility stuff
param_dict['parametric'] = False
param_dict['translation_mode'] = False
param_dict['rigid_motion_mode'] = False
param_dict['similarity_transform_mode'] = False
# Add user-provided params
for arg in kwargs.keys():
param_dict[arg] = kwargs[arg]
return param_dict
def affine_to_vf(A, b, M, N):
"""Given (A, b), return associated vector field on M x N image plane
An affine transformation is parameterized by an invertible matrix A and a
vector b, and sends a 2D vector x to the 2D vector A*x + b. In the image
context, x lies in the M by N image plane. This function takes the pair (A,
b), and returns the associated vector field (tau_u, tau_v): here tau_u and
tau_v are M by N matrices such that (tau_u)_{ij} = (1st row of A) * [i, j]
+ b_1, and (tau_v)_{ij} = (2nd row of A) * [i, j] + b_2. The matrices thus
represent how the affine transformation (A, b) deforms the sampled image
plane.
Thus in general tau_u and tau_v have entries that may not be contained in
the M by N image plane and may not be integers. These issues of boundary
effects and interpolation effects are to be handled by other functions
inputs:
--------
A : numpy.ndarray (size (2, 2))
GL(2) part of affine transformation to apply
b : numpy.ndarray (size (2,))
translation part of affine transformation to apply
M : int
vertical (number of rows) size of image plane being worked with
N : int
horizontal (number of cols) size of image plane being worked with
outputs:
-------
tau_u : numpy.ndarray (size (M, N))
horizontal component of vector field corresponding to (A, b)
tau_v : numpy.ndarray (size (M, N))
vertical component of vector field corresponding to (A, b)
"""
# Do it with broadcasting tricks (dunno if it's faster)
A0 = A[:,0]
A1 = A[:,1]
eu = np.dot(np.arange(M)[:,np.newaxis], np.ones(N)[:,np.newaxis].T)
ev = np.dot(np.ones(M)[:,np.newaxis], np.arange(N)[:,np.newaxis].T)
tau = A0[np.newaxis, np.newaxis, :] * eu[..., np.newaxis] + \
A1[np.newaxis, np.newaxis, :] * ev[..., np.newaxis] + \
b[np.newaxis, np.newaxis, :] * np.ones((M, N, 1))
return (tau[:,:,0], tau[:,:,1])
def vf_to_affine(tau_u, tau_v, ctr):
"""Get affine transformation corresponding to a vector field.
General vector fields need not correspond to a particular affine
transformation. In our formulation, we parameterize affine transforms as
tau_u = a * (m-ctr[0] * \One)\One\\adj
+ b * \One (n - ctr[1]*\One)\\adj
+ (c + ctr[0]) * \One\One\\adj,
and similarly for tau_v.
We use the fact that this parameterization is used here to recover the
parameters of the affine transform using simple summing/differencing.
We need ctr as an input because the translation parameter is ambiguous
without knowing the center. However, we can always recover the parameters
of the transformation with respect to any fixed center (say, ctr = zero).
In general, if one provides ctr=np.zeros((2,)) to this function, it is a
left inverse of affine_to_vf called with the correct M, N parameters.
inputs:
--------
tau_u, tau_v : M by N numpy arrays
u and v (resp.) components of the transformation field.
ctr : (2,) shape numpy array
center parameter that the transform was computed with. see center
option in registration_l2. translation parameter is ambiguous without
knowing the center.
outputs:
--------
A : (2,2) numpy array
The A matrix corresponding to the affine transform. Follows our
conventions for how we compute with vector fields in determining how
the entries of A are determined
b : (2,) shape numpy array
The translation parameter corresponding to the affine transform.
Follows standard coordinates on the image plane (as elsewhere).
"""
M, N = tau_u.shape
a00 = tau_u[1, 0] - tau_u[0, 0]
a01 = tau_u[0, 1] - tau_u[0, 0]
a10 = tau_v[1, 0] - tau_v[0, 0]
a11 = tau_v[0, 1] - tau_v[0, 0]
A = np.array([[a00, a01], [a10, a11]])
u_sum = np.sum(tau_u)
v_sum = np.sum(tau_v)
m_sum = np.sum(np.arange(M) - ctr[0] * np.ones((M,)))
n_sum = np.sum(np.arange(N) - ctr[1] * np.ones((N,)))
b0 = (u_sum - a00 * m_sum * N - a01 * M * n_sum) / M / N - ctr[0]
b1 = (v_sum - a10 * m_sum * N - a11 * M * n_sum) / M / N - ctr[1]
b = np.array([b0, b1])
return A, b
def registration_l2_exp(Y, X, W, Om, center, transform_mode, optim_vars, param_dict=get_default_pgd_dict(), visualize=False):
"""
This is yet another version of the cost-smoothed motif detection, in which we also infer
a (constant) background around the motif
Inputs:
Y -- input image
X -- motif, embedded into an image of the same size as the target image
Om -- support of the motif
transform_mode -- 'affine', 'similarity', 'euclidean', 'translation'
Outputs:
same as usual
"""
from time import perf_counter
vecnorm_2 = lambda A: np.linalg.norm( A.ravel(), 2 )
m, n, c = Y.shape
# Gradient descent parameters
MAX_ITER = param_dict['max_iter']
TOL = param_dict['tol']
step = param_dict['step']
if transform_mode == 'affine':
[A, b] = optim_vars
elif transform_mode == 'similarity':
[dil, phi, b] = optim_vars
A = dil * np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'euclidean':
[phi, b] = optim_vars
A = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'translation':
[b] = optim_vars
A = np.eye(2)
else:
raise ValueError('Wrong transform mode.')
# initialization (here, affine motion mode)
corr = np.dot(np.eye(2) - A, center)
tau_u, tau_v = affine_to_vf(A, b + corr, m, n)
# External smoothing: calculate gaussian weights
g = gaussian_filter_2d(m,n,sigma_u=param_dict['sigma'])
g = g / np.sum(g)
h = gaussian_filter_2d(m,n,sigma_u=5*param_dict['sigma'])
h = h / np.sum(h)
# Calculate initial error
error = np.inf * np.ones( (MAX_ITER,) )
Rvals = np.zeros( (MAX_ITER,) )
# initial interpolated image and error
cur_Y = image_interpolation_bicubic(Y, tau_u, tau_v )
# initialize the background
beta0 = cconv_fourier(h[...,np.newaxis], cur_Y - X)
beta = cconv_fourier(h[...,np.newaxis], beta0)
cur_X = np.zeros((m,n,c))
cur_X = (1-Om)*beta + Om*X
FWres = W * cconv_fourier(g[...,np.newaxis], cur_Y-cur_X)
grad_A = np.zeros( (2,2) )
grad_b = np.zeros( (2,) )
m_vec = np.arange(m) - center[0]
n_vec = np.arange(n) - center[1]
if param_dict['use_nesterov'] is False:
for idx in range(MAX_ITER):
# Get the basic gradient ingredients
Y_dot_u = dimage_interpolation_bicubic_dtau1(Y, tau_u, tau_v)
Y_dot_v = dimage_interpolation_bicubic_dtau2(Y, tau_u, tau_v)
# Get the "tau gradient" part.
# All the translation-dependent parts of the cost can be handled
# here, so that the parametric parts are just the same as always.
dphi_dY = cconv_fourier(dsp_flip(g)[...,np.newaxis], FWres)
tau_u_dot = np.sum(dphi_dY * Y_dot_u, -1)
tau_v_dot = np.sum(dphi_dY * Y_dot_v, -1)
# Get parametric part gradients
# Convert to parametric gradients
# Get row and col sums
tau_u_dot_rowsum = np.sum(tau_u_dot, 1)
tau_u_dot_colsum = np.sum(tau_u_dot, 0)
tau_v_dot_rowsum = np.sum(tau_v_dot, 1)
tau_v_dot_colsum = np.sum(tau_v_dot, 0)
# Put derivs
# These need to be correctly localized to the region of interest
grad_A[0, 0] = np.dot(tau_u_dot_rowsum, m_vec)
grad_A[1, 0] = np.dot(tau_v_dot_rowsum, m_vec)
grad_A[0, 1] = np.dot(tau_u_dot_colsum, n_vec)
grad_A[1, 1] = np.dot(tau_v_dot_colsum, n_vec)
grad_b[0] = np.sum(tau_u_dot_rowsum)
grad_b[1] = np.sum(tau_v_dot_rowsum)
# Precondition for crab body motif
grad_A /= 100
dphi_dbeta0 = -cconv_fourier( dsp_flip(h)[...,np.newaxis], (1-Om) * dphi_dY )
# Now update parameters
grad_norm = np.sqrt(npla.norm(grad_A.ravel(),2)**2 + npla.norm(grad_b,ord=2)**2)
#phi = phi - step * grad_phi / 86
if idx > 5:
if transform_mode == 'affine':
A = A - step * grad_A
b = b - step * grad_b
elif transform_mode == 'similarity':
grad_dil, grad_phi, grad_b = l2err_sim_grad(dil, phi, grad_A, grad_b)
dil = dil - step * grad_dil * 0.1
phi = phi - step * grad_phi
b = b - step * grad_b
A = dil * np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'euclidean':
grad_phi, grad_b = l2err_se_grad(phi, grad_A, grad_b)
phi = phi - step * grad_phi
b = b - step * grad_b
A = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'translation':
b = b - step * grad_b
A = np.eye(2)
beta0 = beta0 - 25 * step * dphi_dbeta0
corr = np.dot(np.eye(2) - A, center)
tau_u, tau_v = affine_to_vf(A, b + corr, m, n)
# Bookkeeping (losses and exit check)
cur_Y = image_interpolation_bicubic(Y, tau_u, tau_v )
beta = cconv_fourier(h[...,np.newaxis], beta0)
cur_X = np.zeros((m,n,c))
cur_X = (1-Om)*beta + Om*X
FWres = W * cconv_fourier(g[...,np.newaxis], cur_Y-cur_X)
error[idx] = .5 * np.sum(FWres ** 2)
cur_X_wd = cur_X * Om
for ic in range(3):
cur_X_wd[:,:,ic] -= np.mean(cur_X_wd[:,:,ic][cur_X_wd[:,:,ic] > 0])
cur_Y_wd = cur_Y * Om
for ic in range(3):
cur_Y_wd[:,:,ic] -= np.mean(cur_Y_wd[:,:,ic][cur_Y_wd[:,:,ic] > 0])
Rvals[idx] = np.sum(Om * cur_X_wd * cur_Y_wd) / ( vecnorm_2(Om * cur_X_wd) * vecnorm_2(Om * cur_Y_wd) )
if idx > 0 and error[idx] > error[idx-1]:
# print('Nonmontone, cutting step')
step = step / 2
else:
step = step * 1.01
cur_Y_disp = cur_Y.copy()
cur_Y_disp[:,:,1] = Om[:,:,1]
cur_Y_disp[:,:,2] = Om[:,:,2]
loopStop = perf_counter()
if grad_norm < TOL:
if param_dict['quiet'] is False:
print(f'Met objective at iteration {idx}, '
'exiting...')
break
if (idx % param_dict['epoch_len']) == 0:
if param_dict['quiet'] is False:
print('iter {:d} objective {:.4e} correlation {:.4f}'.format(idx, error[idx], Rvals[idx]))
if visualize is True:
if (idx % 10) == 0:
if param_dict['quiet'] is False:
plt.imshow(cur_Y_disp)
plt.show()
# This next block of code is for Nesterov accelerated GD.
else:
raise NotImplementedError('Test function only implements vanilla GD')
if transform_mode == 'affine':
optim_vars_new = [A, b]
elif transform_mode == 'similarity':
optim_vars_new = [dil, phi, b]
elif transform_mode == 'euclidean':
optim_vars_new = [phi, b]
elif transform_mode == 'translation':
optim_vars_new = [b]
return tau_u, tau_v, optim_vars_new, error, Rvals
def dilate_support(Om,sigma):
M = Om.shape[0]
N = Om.shape[1]
psi = gaussian_filter_2d(M,N,sigma_u=sigma)
delta = np.exp(-2) * ((2.0*np.pi*sigma) ** -.5)
Om_tilde = cconv_fourier(psi[...,np.newaxis],Om)
for i in range(M):
for j in range(N):
if Om_tilde[i,j,0] < delta:
Om_tilde[i,j,0] = 0
Om_tilde[i,j,1] = 0
Om_tilde[i,j,2] = 0
else:
Om_tilde[i,j,0] = 1
Om_tilde[i,j,1] = 1
Om_tilde[i,j,2] = 1
return Om_tilde
def rotation_mat(theta):
sin = np.sin(theta)
cos = np.cos(theta)
mat = np.array([[cos, -sin], [sin, cos]])
return mat
def l2err_se_grad(phi, grad_A, grad_b):
""" Calculate loss gradient in SE registration prob using aff gradient
This gradient is for the parametric version of the problem, with the
parameterization in terms of the special euclidean group (oriented rigid
motions of the plane).
It wraps l2err_aff_grad, since chain rule lets us easily calculate this
problem's gradient using the affine problem's gradient.
Implementation ideas:
- for ease of implementation, require the current angle phi as an input,
although it could probably be determined from tau_u and tau_v in general.
Inputs:
phi : angle parameter of matrix part of current rigid motion iterate.
grad_A : gradient of the cost with respect to A (matrix parameter of
affine transform) (output from l2err_aff_grad)
grad_b : gradient of the cost with respect to b (translation parameter
of affine transform) (output from l2err_aff_grad)
Outputs:
grad_phi : gradient of the cost with respect to phi (angular parameter of
rotational part of special euclidean transform:
grad_b : gradient of the cost with respect to b (translation parameter
of rigid motion)
"""
# rigid motion derivative matrix
G = np.array([[-np.sin(phi), -np.cos(phi)], [np.cos(phi), -np.sin(phi)]])
# Put derivatives
grad_phi = np.sum(G * grad_A)
return grad_phi, grad_b
def l2err_sim_grad(dil, phi, grad_A, grad_b):
""" Calculate loss gradient in similarity xform registration prob
This gradient is for the parametric version of the problem, with the
parameterization in terms of the similarity transformations (rigid motions
with the rotation multiplied by a scale parameter).
It wraps l2err_aff_grad, since chain rule lets us easily calculate this
problem's gradient using the affine problem's gradient.
Implementation ideas:
- for ease of implementation, require the current angle phi as an input,
although it could probably be determined from tau_u and tau_v in general.
Inputs:
dil : dilation (scale) parameter of matrix part of current similarity
transform iterate.
phi : angle parameter of matrix part of current rigid motion iterate.
grad_A : gradient of the cost with respect to A (matrix parameter of
affine transform) (output from l2err_aff_grad)
grad_b : gradient of the cost with respect to b (translation parameter
of affine transform) (output from l2err_aff_grad)
Outputs:
grad_phi : gradient of the cost with respect to dil (dilation/scale
parameter of similarity transform)
grad_phi : gradient of the cost with respect to phi (angular parameter of
rotational part of special euclidean transform:
grad_b : gradient of the cost with respect to b (translation parameter
of rigid motion)
"""
# rigid motion matrix
G = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
# rigid motion derivative matrix
Gdot = np.array([[-np.sin(phi), -np.cos(phi)], [np.cos(phi), -np.sin(phi)]])
# Put derivatives
grad_dil = np.sum(G * grad_A)
grad_phi = dil * np.sum(Gdot * grad_A)
return grad_dil, grad_phi, grad_b
def apply_random_transform( X0, Om0, c, mode, s_dist, phi_dist, theta_dist, b_dist, return_params=True ):
N0 = X0.shape[0]
N1 = X0.shape[1]
C = X0.shape[2]
tf_params = sample_random_transform( mode, s_dist, phi_dist, theta_dist, b_dist )
A = tf_params[0]
b = tf_params[1]
# apply the transformation
corr = np.dot(np.eye(2) - A, c)
(tau_u, tau_v) = affine_to_vf(A, b + corr, N0, N1)
X = image_interpolation_bicubic(X0, tau_u, tau_v)
Om = image_interpolation_bicubic(Om0, tau_u, tau_v)
if return_params is False:
return X, Om
else:
return X, Om, tf_params
def sample_random_transform( mode, s_dist, phi_dist, theta_dist, b_dist ):
s_min = s_dist[0]
s_max = s_dist[1]
phi_min = phi_dist[0]
phi_max = phi_dist[1]
theta_min = theta_dist[0]
theta_max = theta_dist[1]
b_min = b_dist[0]
b_max = b_dist[1]
b = np.zeros((2,))
b[0] = np.random.uniform(b_min,b_max)
b[1] = np.random.uniform(b_min,b_max)
if mode == 'affine':
s1 = np.random.uniform(s_min,s_max)
s2 = np.random.uniform(s_min,s_max)
phi = | np.random.uniform(phi_min,phi_max) | numpy.random.uniform |
# <NAME> code for the project of the module "Programming and scripting"
# Data visualization and parameters
import csv
import numpy as np
import matplotlib.pyplot as mpl
from scipy import stats # to calculate the mode
print()
print('sep_len sep_wid pet_len pet_wid (cm)')
print()
with open('data/iris.csv', newline='') as csvFile:
for line in csvFile:
line = line.replace(',', ' ') #removes comma separating the numbers
print(' ' + line[:3]+ ' ' + line[4:7]+ ' ' + line[8:11]+ ' ' + line[12:15])
# This line prints in groups of 3 positions because the 4th value is excluded.
# F.i. the first instruction prints the positions 0, 1st and 2nd but 3rd position (the white space) is excluded
data = np.genfromtxt('data/iris.csv', delimiter = ',')
# defining VARIABLES
# column variables
col1 = data[:,0]
col2 = data[:,1]
col3 = data[:,2]
col4 = data[:,3]
# flower class variables
setSL = data[0:50,0]
setSW = data[0:50,1]
setPL = data[0:50,2]
setPW = data[0:50,3]
verSL = data[51:100,0]
verSW = data[51:100,1]
verPL = data[51:100,2]
verPW = data[51:100,3]
virSL = data[101:150,0]
virSW = data[101:150,1]
virPL = data[101:150,2]
virPW = data[101:150,3]
# Parameters from the file ingnoring flower classes. Means:
meancol1 = np.mean(col1)
meancol2 = np.mean(col2)
meancol3 = np.mean(col3)
meancol4 = np.mean(col4)
print('sep_len sep_wid pet_len pet_wid (cm)')
print()
print("1) PARAMETERS taken out of the file data IGNORING the flower classes:")
print()
print("MEANS (cm)")
print(' '+ '{0:.2f}'.format(meancol1) + " " + '{0:.2f}'.format(meancol2) + " " + '{0:.2f}'.format(meancol3) + " " + '{0:.2f}'.format(meancol4))
# Medians
mediancol1 = np.median(col1)
mediancol2 = np.median(col2)
mediancol3 = np.median(col3)
mediancol4 = np.median(col4)
print()
print("MEDIANS (cm)")
print(' '+ '{0:.2f}'.format(mediancol1) + " " + '{0:.2f}'.format(mediancol2) + " " + '{0:.2f}'.format(mediancol3) + " " + '{0:.2f}'.format(mediancol4))
print()
# modes
modecol1 = stats.mode(col1)
modecol2 = stats.mode(col2)
modecol3 = stats.mode(col3)
modecol4 = stats.mode(col4)
print('MODES (cm) (sep_len sep_wid pet_len pet_wid)') # The second attribute, count, is the number of times it occurs in the data set.
print("", modecol1, "\n", modecol2, "\n", modecol3, "\n", modecol4)
print()
# maximums
maxCol1= np.amax(col1)
maxCol2= np.amax(col2)
maxCol3= np.amax(col3)
maxCol4= np.amax(col4)
print('MAXIMUMS (cm)')
print(" ", maxCol1, " ", maxCol2, " ", maxCol3, " ", maxCol4)
print()
print(' sep_len sep_wid pet_len pet_wid (cm)')
print()
# minimums
minCol1= np.amin(col1)
minCol2= np.amin(col2)
minCol3= np.amin(col3)
minCol4= np.amin(col4)
print('MINIMUMS (cm)')
print(" ", minCol1, " ", minCol2, " ", minCol3, " ", minCol4)
print()
# standard deviation
stdcol1 = np.std(col1)
stdcol2 = np.std(col2)
stdcol3 = np.std(col3)
stdcol4 = np.std(col4)
print("STANDARD DEVIATIONS (cm)")
print(' '+ '{0:.2f}'.format(stdcol1) + " " + '{0:.2f}'.format(stdcol2) + " " + '{0:.2f}'.format(stdcol3) + " " + '{0:.2f}'.format(stdcol4))
print()
# correlation coefficient between leaf length and width
corCcol1_2 = np.corrcoef(col1, col2)[1,0] # [1,0] added to obtain just one value
corCcol3_4 = np.corrcoef(col3, col4)[1,0]
print('CORRELATION COEFFICITENT')
print(' - Between sepal length and sepal width', '{0:.2f}'.format(corCcol1_2))
print(' - Between petal length and petal width', '{0:.2f}'.format(corCcol3_4))
print()
print()
print("2) PARAMETERS taken out of the file data CONSIDERING the flower classes:")
# means by flower class: sepal length
meanSetSL= np.mean(setSL)
meanVerSL= np.mean(verSL)
meanVirSL= np.mean(virSL)
# means sepal width
meanSetSW= np.mean(setSW)
meanVerSW= np.mean(verSW)
meanVirSW= np.mean(virSW)
# means petal length
meanSetPL= np.mean(setPL)
meanVerPL= np.mean(verPL)
meanVirPL= np.mean(virPL)
# means petal width
meanSetPW= np.mean(setPW)
meanVerPW= np.mean(verPW)
meanVirPW= np.mean(virPW)
print()
print('MEANS by flower class (cm)')
print()
print('<NAME>')
print(' '+ '{0:.2f}'.format(meanSetSL) + " " + '{0:.2f}'.format(meanSetSW) + " " + '{0:.2f}'.format(meanSetPL) + " " + '{0:.2f}'.format(meanSetPW))
print('<NAME>')
print(' '+ '{0:.2f}'.format(meanVerSL) + " " + '{0:.2f}'.format(meanVerSW) + " " + '{0:.2f}'.format(meanVerPL) + " " + '{0:.2f}'.format(meanVerPW))
print('<NAME>')
print(' '+ '{0:.2f}'.format(meanVirSL) + " " + '{0:.2f}'.format(meanVirSW) + " " + '{0:.2f}'.format(meanVirPL) + " " + '{0:.2f}'.format(meanVirPW))
print()
print(' sep_len sep_wid pet_len pet_wid (cm)')
# medians by flower class: sepal length
medianSetSL= np.median(setSL)
medianVerSL= np.median(verSL)
medianVirSL= np.median(virSL)
# medians sepal width
medianSetSW= np.median(setSW)
medianVerSW= np.median(verSW)
medianVirSW= np.median(virSW)
# medians petal length
medianSetPL= np.median(setPL)
medianVerPL= np.median(verPL)
medianVirPL= np.median(virPL)
# medians petal width
medianSetPW= np.median(setPW)
medianVerPW= np.median(verPW)
medianVirPW= np.median(virPW)
print()
print('MEDIANS by flower class (cm)')
print()
print('<NAME>')
print(" ", medianSetSL, " ", medianSetSW, " ", medianSetPL, " ", medianSetPW)
print('<NAME>')
print(" ", medianVerSL, " ", medianVerSW, " ", medianVerPL, " ", medianVerPW)
print('Iris Virginica')
print(" ", medianVirSL, " ", medianVirSW, " ", medianVirPL, " ", medianVirPW)
print()
# modes by flower class: sepal length
modeSetSL= stats.mode(setSL)
modeVerSL= stats.mode(verSL)
modeVirSL= stats.mode(virSL)
# modes sepal width
modeSetSW= stats.mode(setSW)
modeVerSW= stats.mode(verSW)
modeVirSW= stats.mode(virSW)
# modes petal length
modeSetPL= stats.mode(setPL)
modeVerPL= stats.mode(verPL)
modeVirPL= stats.mode(virPL)
# modes petal width
modeSetPW= stats.mode(setPW)
modeVerPW= stats.mode(verPW)
modeVirPW= stats.mode(virPW)
print()
print('MODES by flower class (cm) (sep_len sep_wid pet_len pet_wid)') # The second attribute, count, is the number of times it occurs in the data set.
print()
print('<NAME>') # The \n introduces a break line
print("", modeSetSL, "\n", modeSetSW, "\n", modeSetPL, "\n", modeSetPW)
print()
print('<NAME>')
print("", modeVerSL, "\n", modeVerSW, "\n", modeVerPL, "\n", modeVerPW)
print()
print('Iris Virginica')
print("", modeVirSL, "\n", modeVirSW, "\n", modeVirPL, "\n", modeVirPW)
print()
# maximum values by flower class: sepal length
maxSetSL= np.amax(setSL)
maxVerSL= np.amax(verSL)
maxVirSL= np.amax(virSL)
# maximums sepal width
maxSetSW= np.amax(setSW)
maxVerSW= np.amax(verSW)
maxVirSW= np.amax(virSW)
# maximums petal length
maxSetPL= np.amax(setPL)
maxVerPL= np.amax(verPL)
maxVirPL= np.amax(virPL)
# maximums petal width
maxSetPW= np.amax(setPW)
maxVerPW= np.amax(verPW)
maxVirPW= np.amax(virPW)
print()
print('MAXIMUMS by flower class (cm)')
print()
print('<NAME>')
print(" ", maxSetSL, " ", maxSetSW, " ", maxSetPL, " ", maxSetPW)
print('<NAME>')
print(" ", maxVerSL, " ", maxVerSW, " ", maxVerPL, " ", maxVerPW)
print('<NAME>')
print(" ", maxVirSL, " ", maxVirSW, " ", maxVirPL, " ", maxVirPW)
print()
print(' sep_len sep_wid pet_len pet_wid (cm)')
# minimum values by flower class: sepal length
minSetSL= np.amin(setSL)
minVerSL= np.amin(verSL)
minVirSL= np.amin(virSL)
# minimums sepal width
minSetSW= np.amin(setSW)
minVerSW= np.amin(verSW)
minVirSW= np.amin(virSW)
# minimums petal length
minSetPL= np.amin(setPL)
minVerPL= np.amin(verPL)
minVirPL= np.amin(virPL)
# minimums petal width
minSetPW= np.amin(setPW)
minVerPW= np.amin(verPW)
minVirPW= np.amin(virPW)
print()
print('MINIMUMS by flower class (cm)')
print()
print('<NAME>')
print(" ", minSetSL, " ", minSetSW, " ", minSetPL, " ", minSetPW)
print('<NAME>')
print(" ", minVerSL, " ", minVerSW, " ", minVerPL, " ", minVerPW)
print('<NAME>')
print(" ", minVirSL, " ", minVirSW, " ", minVirPL, " ", minVirPW)
print()
# standard deviations by flower class: sepal length
stdSetSL= np.std(setSL)
stdVerSL= np.std(verSL)
stdVirSL= np.std(virSL)
# standard deviations sepal width
stdSetSW= np.std(setSW)
stdVerSW= np.std(verSW)
stdVirSW= np.std(virSW)
# standard deviations petal length
stdSetPL= np.std(setPL)
stdVerPL= np.std(verPL)
stdVirPL= np.std(virPL)
# standard deviations petal width
stdSetPW= np.std(setPW)
stdVerPW= np.std(verPW)
stdVirPW= np.std(virPW)
print()
print('STANDARD DEVIATIONS by flower class (cm)')
print()
print(' sep_len sep_wid pet_len pet_wid (cm)')
print()
print('<NAME>')
print(' '+ '{0:.2f}'.format(stdSetSL) + " " + '{0:.2f}'.format(stdSetSW) + " " + '{0:.2f}'.format(stdSetPL) + " " + '{0:.2f}'.format(stdSetPW))
print('<NAME>icolor')
print(' '+ '{0:.2f}'.format(stdVerSL) + " " + '{0:.2f}'.format(stdVerSW) + " " + '{0:.2f}'.format(stdVerPL) + " " + '{0:.2f}'.format(stdVerPW))
print('<NAME>')
print(' '+ '{0:.2f}'.format(stdVirSL) + " " + '{0:.2f}'.format(stdVirSW) + " " + '{0:.2f}'.format(stdVirPL) + " " + '{0:.2f}'.format(stdVirPW))
print()
# Correlation coefficient between length and width flower leaf
setCorSepal = np.corrcoef(setSL, setSW)[1,0]
setCorPetal = np.corrcoef(setPL, setPW)[1,0]
verCorSepal = | np.corrcoef(verSL, verSW) | numpy.corrcoef |
def selection_2():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-3.2,3.2,65,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-3.15,-3.05,-2.95,-2.85,-2.75,-2.65,-2.55,-2.45,-2.35,-2.25,-2.15,-2.05,-1.95,-1.85,-1.75,-1.65,-1.55,-1.45,-1.35,-1.25,-1.15,-1.05,-0.95,-0.85,-0.75,-0.65,-0.55,-0.45,-0.35,-0.25,-0.15,-0.05,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,1.95,2.05,2.15,2.25,2.35,2.45,2.55,2.65,2.75,2.85,2.95,3.05,3.15])
# Creating weights for histo: y3_PHI_0
y3_PHI_0_weights = numpy.array([1626.27534535,4878.82723604,6911.67091772,5691.96310871,4472.2552997,7318.23885405,4878.82723604,6505.09898138,6098.53104505,6911.67091772,7318.23885405,8944.5145994,5691.96310871,6098.53104505,6505.09898138,6098.53104505,6911.67091772,4065.68736336,5285.39517237,4878.82723604,7318.23885405,2845.98155435,5691.96310871,6505.09898138,5285.39517237,5691.96310871,8537.94666306,6098.53104505,4065.68736336,4065.68736336,6911.67091772,6505.09898138,7318.23885405,4472.2552997,6505.09898138,8944.5145994,7724.80679039,5285.39517237,6911.67091772,9757.65047207,7724.80679039,5285.39517237,7318.23885405,5691.96310871,6911.67091772,6098.53104505,7724.80679039,6911.67091772,5691.96310871,6098.53104505,7724.80679039,7724.80679039,8537.94666306,5691.96310871,6098.53104505,9351.08253574,6098.53104505,6505.09898138,7318.23885405,4878.82723604,8131.37472673,6911.67091772,9757.65047207,2439.41281802])
# Creating weights for histo: y3_PHI_1
y3_PHI_1_weights = numpy.array([1363.6550396,4772.7928386,5113.7048985,4772.7928386,5113.7048985,5795.5330183,4772.7928386,4090.9647188,7841.0173777,7500.1013178,8522.8454975,4431.8807787,5454.6209584,5113.7048985,5113.7048985,3750.0514589,4772.7928386,5454.6209584,5113.7048985,6477.3611381,4090.9647188,5795.5330183,6136.4490782,5795.5330183,3068.2237391,5454.6209584,4772.7928386,6477.3611381,8522.8454975,6136.4490782,4772.7928386,4772.7928386,4772.7928386,5795.5330183,5454.6209584,3409.137399,5795.5330183,5113.7048985,4431.8807787,4431.8807787,5113.7048985,8522.8454975,5113.7048985,2386.3964193,4431.8807787,4090.9647188,4772.7928386,5454.6209584,7500.1013178,3409.137399,5795.5330183,6818.273198,5454.6209584,4772.7928386,5795.5330183,6136.4490782,5113.7048985,5454.6209584,4431.8807787,5795.5330183,6136.4490782,7841.0173777,7500.1013178,2386.3964193])
# Creating weights for histo: y3_PHI_2
y3_PHI_2_weights = numpy.array([433.601879037,1052.01770652,1052.01770652,1208.39886289,1300.80563711,1123.10008669,995.15212238,1130.2080847,1236.83165496,1315.02203315,1101.77529264,1123.10008669,1115.99168867,1179.96567082,1023.58491445,895.636550142,1158.64127677,1144.42488074,1023.58491445,1350.56322323,1179.96567082,1194.18246686,1144.42488074,1073.34250057,1094.66729462,1236.83165496,1137.31648272,1187.07406884,1307.91403513,1222.61525892,1286.58924108,1101.77529264,1016.47651643,1137.31648272,1151.53287875,1286.58924108,1151.53287875,1194.18246686,1165.74927479,1101.77529264,1115.99168867,980.935326346,1151.53287875,1080.45049858,1165.74927479,1080.45049858,1151.53287875,1052.01770652,980.935326346,1052.01770652,1123.10008669,1059.12610453,1073.34250057,1108.88369065,1208.39886289,959.610932295,1215.50686091,1009.36851841,1165.74927479,1172.85767281,1059.12610453,973.827328329,1307.91403513,490.467863173])
# Creating weights for histo: y3_PHI_3
y3_PHI_3_weights = | numpy.array([37.914355132,125.188929964,110.881604254,112.312324825,107.304762826,105.874042255,103.727961399,130.196451963,105.874042255,104.443321684,113.743085396,105.15868197,98.7203994002,128.050371106,115.889166252,110.166243968,123.042809107,103.012601113,116.604526538,123.042809107,118.035247109,122.327448822,106.589402541,124.473529678,114.458445681,109.450883683,117.319886823,106.589402541,100.866480257,104.443321684,117.319886823,118.035247109,110.881604254,99.4357596857,120.896728251,115.889166252,118.035247109,128.050371106,115.173805967,109.450883683,117.319886823,120.181367965,103.727961399,109.450883683,104.443321684,103.727961399,115.889166252,114.458445681,111.596964539,114.458445681,118.035247109,117.319886823,115.173805967,118.750607394,131.627212534,97.2896788292,106.589402541,134.488653676,107.304762826,127.335010821,110.881604254,116.604526538,120.181367965,57.2292228407])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7) | numpy.array |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import logging
from neural_compressor.utils.logger import log
import math
import os
import random
import copy
import datasets
from datasets import load_dataset, load_metric
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.distributed as dist
from tqdm.auto import tqdm
import numpy as np
import transformers
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logger = logging.getLogger(__name__)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_seq_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument('--use_auth_token', action='store_true', help="use authentic token")
parser.add_argument("--resume", type=str, default=None, help="Where to resume from the provided model.")
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument('--do_prune', action='store_true',
help="prune model")
parser.add_argument('--do_eval', action='store_true',
help="evaluate model")
parser.add_argument('--do_quantization', action='store_true',
help="do quantization aware training on model")
parser.add_argument('--do_distillation', action='store_true',
help="do distillation with pre-trained teacher model")
parser.add_argument("--prune_config", default='prune.yaml', help="pruning config")
parser.add_argument("--quantization_config", default='qat.yaml', help="quantization config")
parser.add_argument("--distillation_config", default='distillation.yaml', help="pruning config")
parser.add_argument(
"--teacher_model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models"
" to be the teacher model.",
required=True,
)
parser.add_argument("--core_per_instance", type=int, default=-1, help="cores per instance.")
parser.add_argument("--temperature", default=1, type=float,
help='temperature parameter of distillation')
parser.add_argument("--loss_types", default=['CE', 'KL'], type=str, nargs='+',
help='loss types of distillation, should be a list of length 2, '
'first for student targets loss, second for teacher student loss.')
parser.add_argument("--loss_weights", default=[0.5, 0.5], type=float, nargs='+',
help='loss weights of distillation, should be a list of length 2, '
'and sum to 1.0, first for student targets loss weight, '
'second for teacher student loss weight.')
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def gather_results(predictions, gt):
if rank != -1:
pred_list = [predictions.clone() for _ in range(world)] if rank == 0 else []
gt_list = [gt.clone() for _ in range(world)] if rank == 0 else []
dist.gather(predictions, gather_list=pred_list)
dist.gather(gt, gather_list=gt_list)
return pred_list[0], gt_list[0]
else:
return predictions, gt
def evaluation(model, eval_dataloader, metric):
logger.info("***** Running eval *****")
logger.info(f" Num examples = {len(eval_dataloader) }")
model.eval()
eval_dataloader = tqdm(eval_dataloader, desc="Evaluating")
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)['logits']
predictions = outputs.argmax(dim=-1)
metric.add_batch(
predictions=predictions,
references=batch["labels"],
)
eval_metric = metric.compute()
logger.info(f"eval_metric : {eval_metric}")
return eval_metric['accuracy']
def train(args, model, train_dataloader, lr_scheduler, criterion, optimizer, agent, eval_dataloader, metric):
# Train!
total_batch_size = args.batch_size * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
completed_steps = 0
agent.pre_epoch_begin()
model = agent.model.model
for epoch in range(args.num_train_epochs):
model.train()
train_dataloader = tqdm(train_dataloader, desc="Training")
agent.on_epoch_begin(epoch)
for step, batch in enumerate(train_dataloader):
agent.on_batch_begin(step)
teacher_logits = None
if 'teacher_logits' in batch:
teacher_logits = batch['teacher_logits']
del batch['teacher_logits']
outputs = model(**batch)
if criterion is None:
loss = outputs.loss
else:
criterion.teacher_outputs = teacher_logits
loss = criterion(outputs['logits'], batch["labels"])
loss = loss / args.gradient_accumulation_steps
loss.backward()
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
agent.on_post_grad()
lr_scheduler.step()
optimizer.zero_grad()
completed_steps += 1
agent.on_batch_end()
if completed_steps >= args.max_train_steps:
break
agent.on_epoch_end()
evaluation(model, eval_dataloader, metric)
agent.post_epoch_end()
def main():
args = parse_args()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", args.task_name)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if args.task_name is not None:
is_regression = args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
use_auth_token=args.use_auth_token)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path,
use_fast=not args.use_slow_tokenizer,
use_auth_token=args.use_auth_token)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, use_auth_token=args.use_auth_token
)
if args.resume:
try:
model.load_state_dict(torch.load(args.resume))
logger.info('Resumed model from {}'.format(args.resume))
except:
raise TypeError('Provided {} is not a valid checkpoint file, '
'please provide .pt file'.format(args.resume))
# Preprocessing the datasets
if args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
logger.info(
f"The configuration of the model provided the following label correspondence: {label_name_to_id}. "
"Using it!"
)
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif args.task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_seq_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
processed_datasets = raw_datasets.map(
preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)
if args.do_distillation:
teacher_config = AutoConfig.from_pretrained(args.teacher_model_name_or_path, \
num_labels=num_labels, finetuning_task=args.task_name)
teacher_tokenizer = AutoTokenizer.from_pretrained(args.teacher_model_name_or_path, \
use_fast=not args.use_slow_tokenizer)
assert teacher_tokenizer.vocab == tokenizer.vocab, \
'teacher model and student model should have same tokenizer.'
teacher_model = AutoModelForSequenceClassification.from_pretrained(
args.teacher_model_name_or_path,
from_tf=bool(".ckpt" in args.teacher_model_name_or_path),
config=teacher_config,
)
para_counter = lambda model:sum(p.numel() for p in model.parameters())
logger.info("***** Number of teacher model parameters: {:.2f}M *****".format(\
para_counter(teacher_model)/10**6))
logger.info("***** Number of student model parameters: {:.2f}M *****".format(\
para_counter(model)/10**6))
# get logits of teacher model
if args.loss_weights[1] > 0:
def get_logits(teacher_model, train_dataset):
logger.info("***** Getting logits of teacher model *****")
logger.info(f" Num examples = {len(train_dataset) }")
teacher_model.eval()
npy_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'{}.{}.npy'.format(args.task_name, args.teacher_model_name_or_path.replace('/', '.')))
if os.path.exists(npy_file):
teacher_logits = [x for x in np.load(npy_file)]
else:
train_dataloader = DataLoader(train_dataset, collate_fn=data_collator, \
batch_size=args.batch_size)
train_dataloader = tqdm(train_dataloader, desc="Evaluating")
teacher_logits = []
for step, batch in enumerate(train_dataloader):
outputs = teacher_model(**batch)
teacher_logits += [x for x in outputs['logits'].numpy()]
np.save(npy_file, | np.array(teacher_logits) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 11:33:01 2019
@author: nikos
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import h5py
from keras.preprocessing import image# for RGB images
import os
#import imageio
from sklearn.model_selection import train_test_split
import cv2# cv2.imread() for grayscale images
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):
"""
Add a vertical color bar to an image plot.
https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
"""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
#%% load the images
img_folder = './data/BBBC010_v2_images'
msk_folder = './data/BBBC010_v1_foreground'
target_height = 400
target_width = 400
Nimages = 100#100 images, each image has 2 channels
# load the filenames of all images
# Note: delete the __MACOSX folder in the img_folder first
img_filenames = np.array(sorted(os.listdir(img_folder)))#sort to alphabetical order
assert len(img_filenames)==Nimages*2#2 channels
wells = [f.split('_')[6] for f in img_filenames]
wells = np.sort( | np.unique(wells) | numpy.unique |
import os, sys, pickle, time, glob
from gym import spaces
import numpy as np
import pybullet as p
from datetime import datetime
from .env import AssistiveEnv
class ScratchItchEnv(AssistiveEnv):
def __init__(self, robot_type='pr2', human_control=False, vr=False, new=False):
self.participant = -1
self.gender = 'male'
self.hipbone_to_mouth_height = 0.6
self.policy_name = ''
self.replay = False
self.replay_dir = None
self.human_gains, self.waist_gains, self.human_forces, self.waist_forces = 0.09, 0.09, 1.0, 4.0
super(ScratchItchEnv, self).__init__(robot_type=robot_type, task='scratch_itch', human_control=human_control, vr=vr, new=new, frame_skip=5, time_step=0.02, action_robot_len=7, action_human_len=(10 if human_control else 0), obs_robot_len=30, obs_human_len=(34 if human_control else 0))
def setup(self, gender, participant, policy_name, hipbone_to_mouth_height):
self.gender = gender
self.participant = participant
self.policy_name = policy_name
if hipbone_to_mouth_height is None:
self.hipbone_to_mouth_height = self.calc_hipbone_to_mouth_height()
else:
self.calc_hipbone_to_mouth_height()
self.hipbone_to_mouth_height = hipbone_to_mouth_height
def step(self, action):
if self.replay:
if self.last_sim_time is None:
self.last_sim_time = time.time()
for frame in range(self.frame_skip):
p.restoreState(fileName=os.path.join(self.replay_dir, 'frame_%d.bullet' % (self.iteration*self.frame_skip + frame + 1)))
# Slow down time so that the simulation matches real time
self.slow_time()
action = self.action_list[self.iteration]
self.iteration += 1
else:
if len(action) < self.action_robot_len + self.action_human_len and self.participant >= 0:
self.free_move(robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'))
obs = self._get_obs([0], [0, 0])
return obs, 0, False, dict()
self.take_step(action, robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.05)
self.action_list.append(action)
if self.vr and self.participant >= 0:
if self.iteration == 200:
# End of simulation, save action_list
with open(os.path.join(self.directory, 'actions.pkl'), 'wb') as f:
pickle.dump(self.action_list, f)
total_force_on_human, tool_force, tool_force_at_target, target_contact_pos = self.get_total_force()
end_effector_velocity = np.linalg.norm(p.getLinkState(self.tool, 1, computeForwardKinematics=True, computeLinkVelocity=True, physicsClientId=self.id)[6])
if target_contact_pos is not None:
target_contact_pos = | np.array(target_contact_pos) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2020
@author: jmmauricio
"""
import numpy as np
from pydae.tools import get_v,get_i,get_s
import json
from collections import namedtuple
import numba
class grid(object):
def __init__(self,syst):
#def bokeh_tools(data):
self.syst = syst
self.s_radio_scale = 0.01
self.s_radio_max = 20
self.s_radio_min = 1
with np.load('matrices.npz') as data:
Y_primitive = data['Y_primitive']
A_conect = data['A_conect']
nodes_list = data['nodes_list']
node_sorter = data['node_sorter']
Y_vv = data['Y_vv']
Y_vi = data['Y_vi']
N_v = int(data['N_v'])
self.nodes_list = nodes_list
self.Y_primitive = Y_primitive
self.A_conect = A_conect
self.node_sorter = node_sorter
self.Y_vv = Y_vv
self.Y_vi = Y_vi
self.N_v = N_v
json_file = 'grid_data.json'
json_file = json_file
json_data = open(json_file).read().replace("'",'"')
data = json.loads(json_data)
self.buses = data['buses']
if 'transformers' in data:
self.transformers = data['transformers']
else:
self.transformers = []
self.lines = data['lines']
self.loads = data['loads']
if 'vscs' in data:
self.vscs = data['vscs']
else: self.vscs = []
def dae2vi(self):
'''
For obtaining line currents from node voltages after power flow is solved.
Returns
-------
None.
'''
n2a = {'1':'a','2':'b','3':'c','4':'n'}
a2n = {'a':1,'b':2,'c':3,'n':4}
V_node_list = []
I_node_list = [0.0]*len(self.nodes_list)
self.I_node_list = I_node_list
for item in self.nodes_list:
bus_name,phase_name = item.split('.')
#i = get_i(self.syst,bus_name,phase_name=n2a[phase_name],i_type='phasor',dq_name='ri')
#I_node_list += [i]
v = get_v(self.syst,bus_name,phase_name=n2a[phase_name],v_type='phasor',dq_name='ri')
V_node_list += [v]
V_node = np.array(V_node_list).reshape(len(V_node_list),1)
V_known = np.copy(V_node[:self.N_v])
V_unknown = np.copy(V_node[self.N_v:])
I_unknown = self.Y_vv @ V_known + self.Y_vi @ V_unknown
#self.I_node = I_node
self.V_node = V_node
self.I_unknown = I_unknown
self.I_known = np.array(I_node_list).reshape(len(I_node_list),1)
self.I_node = np.vstack((self.I_unknown,self.I_known))
for load in self.loads:
bus_name = load['bus']
if load['type'] == '3P+N':
for ph in ['a','b','c','n']:
idx = list(self.nodes_list).index(f"{load['bus']}.{a2n[ph]}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if load['type'] == '1P+N':
ph = load['bus_nodes'][0]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
ph = load['bus_nodes'][1]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
for vsc in self.vscs:
bus_name = vsc['bus_ac']
phases = ['a','b','c','n']
if vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
phases = ['a','b','c']
for ph in phases:
idx = list(self.nodes_list).index(f"{vsc['bus_ac']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if not vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
bus_name = vsc['bus_dc']
for ph in ['a','n']:
idx = list(self.nodes_list).index(f"{vsc['bus_dc']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='r')
self.I_node[idx] += i_
I_lines = self.Y_primitive @ self.A_conect.T @ self.V_node
self.I_lines = I_lines
def get_v(self):
'''
Compute phase-neutral and phase-phase voltages from power flow solution and put values
in buses dictionary.
'''
res = {}
V_sorted = []
I_sorted = []
S_sorted = []
start_node = 0
self.V_results = self.V_node
# self.I_results = self.I_node
V_sorted = self.V_node[self.node_sorter]
I_sorted = self.I_node[self.node_sorter]
nodes2string = ['v_an','v_bn','v_cn','v_gn']
for bus in self.buses:
N_nodes = bus['N_nodes']
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# V = self.V_results[self.nodes.index(bus_node)][0]
# V_sorted += [V]
# nodes_in_bus += [node]
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# I = self.I_results[self.nodes.index(bus_node)][0]
# I_sorted += [I]
if N_nodes==3: # if 3 phases
v_ag = V_sorted[start_node+0,0]
v_bg = V_sorted[start_node+1,0]
v_cg = V_sorted[start_node+2,0]
i_a = I_sorted[start_node+0,0]
i_b = I_sorted[start_node+1,0]
i_c = I_sorted[start_node+2,0]
s_a = (v_ag)*np.conj(i_a)
s_b = (v_bg)*np.conj(i_b)
s_c = (v_cg)*np.conj(i_c)
start_node += 3
bus.update({'v_an':np.abs(v_ag),
'v_bn':np.abs(v_bg),
'v_cn':np.abs(v_cg),
'v_ng':0.0})
bus.update({'deg_an':np.angle(v_ag, deg=True),
'deg_bn':np.angle(v_bg, deg=True),
'deg_cn':np.angle(v_cg, deg=True),
'deg_ng': | np.angle(0, deg=True) | numpy.angle |
from pathlib import Path
from numpy import arange, array, ceil, empty, floor, isnan, linspace, \
log10, meshgrid, nan, tile, transpose, where
from numpy.ma import masked_where
from matplotlib.pyplot import clf, close, cm, colorbar, figure, savefig, show
from mpl_toolkits.basemap import Basemap
from os.path import dirname, isdir, join, realpath
from os import mkdir
import pyapex, seaborn
from scipy.interpolate import interp2d#, RectBivariateSpline
#
from pyigrf.pyigrf import GetIGRF
from pyiri2016 import IRI2016
from pyiri2016 import IRI2016Profile
from pyiri2016.iriweb import irisubgl, firisubl
from timeutil import TimeUtilities
#
cwd = Path(__file__).parent
DataFolder = cwd / 'data'
class IRI2016_2DProf(IRI2016Profile):
#def __init__(self):
# pass
#def _GetTitle(self):
# IRI2016Profile()._GetTitle(__self__)
def HeightVsTime(self, FIRI=False, hrlim=[0., 24.], hrstp=1.):
self.option = 1
nhrstp = int((hrlim[1] + hrstp - hrlim[0]) / hrstp) + 1
hrbins = list(map(lambda x: hrlim[0] + float(x) * hrstp, range(nhrstp)))
Ne = empty((nhrstp, self.numstp))
if FIRI: NeFIRI = empty((nhrstp, self.numstp))
Te = empty((nhrstp, self.numstp))
Ti = empty((nhrstp, self.numstp))
for i in range(nhrstp):
self.hour = hrbins[i]
self.HeiProfile()
Ne[i, :] = self.a[0, range(self.numstp)]
if FIRI: NeFIRI[i, :] = self.a[12, range(self.numstp)]
Te[i, :] = self.a[3, range(self.numstp)]
Ti[i, :] = self.a[2, range(self.numstp)]
# self._GetTitle()
altbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : Ne, 'Te' : Te, 'Ti' : Ti, \
'title1' : self.title1, 'title2' : self.title2}
if FIRI:
self.FIRI2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : NeFIRI, \
'title1' : self.title1, 'title2' : self.title2}
#
# End of 'HeightVsTime'
#####
def LatVsLon(self, lonlim=[-180., 180.], lonstp=20.):
self.option = 2
nlonstp = int((lonlim[1] + lonstp - lonlim[0]) / lonstp) + 1
lonbins = list(map(lambda x: lonlim[0] + float(x) * lonstp, range(nlonstp)))
NmF2 = empty((nlonstp, self.numstp))
hmF2 = empty((nlonstp, self.numstp))
B0 = empty((nlonstp, self.numstp))
dip = empty((nlonstp, self.numstp))
for i in range(nlonstp):
self.lon = lonbins[i]
self.HeiProfile()
NmF2[i, :] = self.b[0, range(self.numstp)]
hmF2[i, :] = self.b[1, range(self.numstp)]
B0[i, :] = self.b[9, range(self.numstp)]
dip[i, :] = self.b[24, range(self.numstp)]
latbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'lat' : latbins, 'lon' : lonbins, \
'NmF2' : NmF2, 'hmF2' : hmF2, 'B0' : B0, 'dip' : dip, \
'title' : self.title3}
#
# End of 'LatVsLon'
#####
def LatVsFL(self, date=[2003, 11, 21], FIRI=False, IGRF=False, time=[23, 15, 0], \
gc=[-77.76, -11.95], \
hlim=[80., 200.], hstp=1., mlatlim=[-10., 10.], mlatstp=.1):
#
# INPUTS
#
# Date
year, month, day = date
# Time
hour, minute, second = time
# Geog. Coord.
dlon, dlat = gc
# hlim -> Height range at equator, in km
# hstp -> height resolution at equator, in km
# mlatlim -> Geom. latitude range, in degrees
# mlatstp -> Geom. latitude resolution, in degrees
#
###
doy = TimeUtilities().CalcDOY(year, month, day)
date2 = year + doy / (365 + 1 if TimeUtilities().IsLeapYear else 0)
# f = figure(figsize=(16,6))
# pn = f.add_subplot(111)
self.coordl, self.qdcoordl = [], []
for h in arange(hlim[0], hlim[1] + hstp, hstp):
gc, qc = pyapex.ApexFL().getFL(date=date2, dlon=dlon, dlat=dlat, \
hateq=h, mlatRange=mlatlim, mlatSTP=mlatstp)
# x, y, z = gc['lat'], gc['alt'], gc['lon']
# ind = where(y < hlim[0])
# if len(ind) > 0: x[ind], y[ind], z[ind] = nan, nan, nan
# pn.plot(x, y)
self.coordl.append([gc['lon'], gc['alt'], gc['lat']])
self.qdcoordl.append([qc['lon'], gc['alt'], qc['lat']])
# pn.invert_xaxis()
# show()
jf = IRI2016().Switches()
jmag = 0
mmdd = int(month * 100) + day
hour2 = hour + minute / 60 + second / 3600
self.coordl = array(self.coordl)
self.qdcoordl = array(self.qdcoordl)
# nfl -> No. of field-line (or height)
# nc -> No. of coord. (0 -> lon, 1 -> alt, 2 -> lat)
# np -> No. of points per field-line
nfl, nc, np = self.coordl.shape
self.ne, self.te = | tile(nan, (np, nfl)) | numpy.tile |
"""
fitting.py
Created by <NAME> on 2017-05-19.
"""
import os
import glob
import inspect
from collections import OrderedDict
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import Planck15
import astropy.constants as const
from . import utils
#from .model import BeamCutout
from .utils import GRISM_COLORS
# Minimum redshift where IGM is applied
IGM_MINZ = 3.4 # blue edge of G800L
# Default parameters for drizzled line map
PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
# IGM from eazy-py
try:
import eazy.igm
IGM = eazy.igm.Inoue14()
except:
IGM = None
def run_all_parallel(id, get_output_data=False, **kwargs):
import numpy as np
from grizli.fitting import run_all
from grizli import multifit
import time
import traceback
t0 = time.time()
print('Run {0}'.format(id))
args = np.load('fit_args.npy')[0]
args['verbose'] = False
for k in kwargs:
args[k] = kwargs[k]
fp = open('{0}_{1:05d}.log_par'.format(args['group_name'], id),'w')
fp.write('{0}_{1:05d}: {2}\n'.format(args['group_name'], id, time.ctime()))
fp.close()
try:
#args['zr'] = [0.7, 1.0]
#mb = multifit.MultiBeam('j100025+021651_{0:05d}.beams.fits'.format(id))
out = run_all(id, **args)
if get_output_data:
return out
status=1
except:
status=-1
trace = traceback.format_exc(limit=2)#, file=fp)
if args['verbose']:
print(trace)
t1 = time.time()
return id, status, t1-t0
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], fitter='nnls', group_name='grism', fit_stacks=True, only_stacks=False, prior=None, fcontam=0.2, pline=PLINE, mask_sn_limit=3, fit_only_beams=False, fit_beams=True, root='*', fit_trace_shift=False, phot=None, phot_obj=None, verbose=True, scale_photometry=False, show_beams=True, scale_on_stacked_1d=True, overlap_threshold=5, MW_EBV=0., sys_err=0.03, get_dict=False, bad_pa_threshold=1.6, units1d='flam', redshift_only=False, line_size=1.6, use_psf=False, get_line_width=False, sed_args={'bin':1, 'xlim':[0.3, 9]}, get_ir_psfs=True, min_mask=0.01, min_sens=0.08, **kwargs):
"""Run the full procedure
1) Load MultiBeam and stack files
2) ... tbd
fwhm=1200; zr=[0.65, 1.6]; dz=[0.004, 0.0002]; group_name='grism'; fit_stacks=True; prior=None; fcontam=0.2; mask_sn_limit=3; fit_beams=True; root=''
"""
import glob
import grizli.multifit
from grizli.stack import StackFitter
from grizli.multifit import MultiBeam
if get_dict:
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['id', 'get_dict', 'frame', 'glob', 'grizli', 'StackFitter', 'MultiBeam']:
if k in args:
args.pop(k)
return args
mb_files = glob.glob('{0}_{1:05d}.beams.fits'.format(root, id))
st_files = glob.glob('{0}_{1:05d}.stack.fits'.format(root, id))
if not only_stacks:
mb = MultiBeam(mb_files, fcontam=fcontam, group_name=group_name, MW_EBV=MW_EBV, sys_err=sys_err, verbose=verbose, psf=use_psf, min_mask=min_mask, min_sens=min_sens)
# Check for PAs with unflagged contamination or otherwise discrepant
# fit
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
if verbose:
print('\nHas bad PA! Final list: {0}\n{1}'.format(keep_dict,
fit_log))
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32)
fig.savefig('{0}_{1:05d}.fix.stack.png'.format(group_name, id))
good_PAs = []
for k in keep_dict:
good_PAs.extend(keep_dict[k])
else:
good_PAs = None # All good
else:
good_PAs = None # All good
redshift_only=True # can't drizzle line maps from stacks
if fit_only_beams:
st = None
else:
st = StackFitter(st_files, fit_stacks=fit_stacks, group_name=group_name, fcontam=fcontam, overlap_threshold=overlap_threshold, MW_EBV=MW_EBV, verbose=verbose, sys_err=sys_err, PAs=good_PAs, chi2_threshold=bad_pa_threshold)
st.initialize_masked_arrays()
if only_stacks:
mb = st
if not only_stacks:
if fit_trace_shift:
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
shift, _ = mb.fit_trace_shift(tol=1.e-3, verbose=verbose,
split_groups=True)
mb.initialize_masked_arrays()
## Get photometry from phot_obj
if (phot is None) & (phot_obj is not None):
phot_i, ii, dd = phot_obj.get_phot_dict(mb.ra, mb.dec)
if dd < 0.5*u.arcsec:
phot = phot_i
if phot is not None:
if phot == 'vizier':
### Get photometry from Vizier catalogs
vizier_catalog = list(utils.VIZIER_BANDS.keys())
phot = utils.get_Vizier_photometry(mb.ra, mb.dec, verbose=verbose,
vizier_catalog=vizier_catalog)
if phot is not None:
zgrid = utils.log_zgrid(zr=zr, dz=0.005)
phot['tempfilt'] = utils.generate_tempfilt(t0,
phot['filters'],
zgrid=zgrid,
MW_EBV=MW_EBV)
if phot is not None:
if st is not None:
st.set_photometry(**phot, min_err=sys_err)
mb.set_photometry(**phot, min_err=sys_err)
if t0 is None:
t0 = utils.load_templates(line_complexes=True, fsps_templates=True, fwhm=fwhm)
if t1 is None:
t1 = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
# Fit on stacked spectra or individual beams
if fit_only_beams:
fit_obj = mb
else:
fit_obj = st
### Do scaling now with direct spectrum function
if (scale_photometry > 0) & (phot is not None):
try:
scl = mb.scale_to_photometry(z=0, method='lm', templates=t0, order=scale_photometry*1-1)
except:
scl = [10.]
if hasattr(scl,'status'):
if scl.status > 0:
print('scale_to_photometry: [{0}]'.format(', '.join(['{0:.2f}'.format(x_i) for x_i in scl.x])))
mb.pscale = scl.x
if st is not None:
st.pscale = scl.x
# First pass
fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
fit_hdu = pyfits.table_to_hdu(fit)
fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
if hasattr(fit_obj, 'pscale'):
fit_hdu.header['PSCALEN'] = (len(fit_obj.pscale)-1, 'PSCALE order')
for i, p in enumerate(fit_obj.pscale):
fit_hdu.header['PSCALE{0}'.format(i)] = (p, 'PSCALE parameter {0}'.format(i))
# Add photometry information
if (fit_obj.Nphot > 0) & hasattr(fit_obj, 'photom_filters'):
h = fit_hdu.header
h['NPHOT'] = fit_obj.Nphot, 'Number of photometry filters'
h['PHOTSRC'] = fit_obj.photom_source, 'Source of the photometry'
for i in range(len(fit_obj.photom_filters)):
h['PHOTN{0:03d}'.format(i)] = fit_obj.photom_filters[i].name.split()[0], 'Filter {0} name'.format(i)
h['PHOTL{0:03d}'.format(i)] = fit_obj.photom_pivot[i], 'Filter {0} pivot wavelength'.format(i)
h['PHOTF{0:03d}'.format(i)] = fit_obj.photom_flam[i], 'Filter {0} flux flam'.format(i)
h['PHOTE{0:03d}'.format(i)] = fit_obj.photom_eflam[i], 'Filter {0} err flam'.format(i)
# # Second pass if rescaling spectrum to photometry
# if scale_photometry:
# scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1-1)
# if scl.status > 0:
# mb.pscale = scl.x
# if st is not None:
# st.pscale = scl.x
#
# fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
# fit_hdu = pyfits.table_to_hdu(fit)
# fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
# Zoom-in fit with individual beams
if fit_beams:
#z0 = fit.meta['Z50'][0]
z0 = fit.meta['z_map'][0]
#width = np.maximum(3*fit.meta['ZWIDTH1'][0], 3*0.001*(1+z0))
width = 20*0.001*(1+z0)
mb_zr = z0 + width*np.array([-1,1])
mb_fit = mb.xfit_redshift(templates=t0, zr=mb_zr, dz=[0.001, 0.0002], prior=prior, fitter=fitter, verbose=verbose)
mb_fit_hdu = pyfits.table_to_hdu(mb_fit)
mb_fit_hdu.header['EXTNAME'] = 'ZFIT_BEAM'
else:
mb_fit = fit
#### Get best-fit template
tfit = mb.template_at_z(z=mb_fit.meta['z_map'][0], templates=t1, fit_background=True, fitter=fitter)
# Redrizzle? ... testing
if False:
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam,
flambda=False,
size=48, scale=1.,
kernel='point', pixfrac=0.1,
zfit=tfit)
# Fit covariance
cov_hdu = pyfits.ImageHDU(data=tfit['covar'], name='COVAR')
Next = mb_fit.meta['N']
cov_hdu.header['N'] = Next
# Line EWs & fluxes
coeffs_clip = tfit['coeffs'][mb.N:]
covar_clip = tfit['covar'][mb.N:,mb.N:]
lineEW = utils.compute_equivalent_widths(t1, coeffs_clip, covar_clip, max_R=5000, Ndraw=1000, z=tfit['z'])
for ik, key in enumerate(lineEW):
for j in range(3):
if not np.isfinite(lineEW[key][j]):
lineEW[key][j] = -1.e30
cov_hdu.header['FLUX_{0:03d}'.format(ik)] = tfit['cfit'][key][0], '{0} line flux; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['ERR_{0:03d}'.format(ik)] = tfit['cfit'][key][1], '{0} line uncertainty; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['EW16_{0:03d}'.format(ik)] = lineEW[key][0], 'Rest-frame {0} EW, 16th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW50_{0:03d}'.format(ik)] = lineEW[key][1], 'Rest-frame {0} EW, 50th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW84_{0:03d}'.format(ik)] = lineEW[key][2], 'Rest-frame {0} EW, 84th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EWHW_{0:03d}'.format(ik)] = (lineEW[key][2]-lineEW[key][0])/2, 'Rest-frame {0} EW, 1-sigma half-width; Angstrom'.format(key.strip('line '))
# Velocity width
if get_line_width:
if phot is not None:
mb.unset_photometry()
vel_width_res = mb.fit_line_width(z0=tfit['z'], bl=1.2, nl=1.2)
if verbose:
print('Velocity width: BL/NL = {0:.0f}/{1:.0f}, z={2:.4f}'.format(vel_width_res[0]*1000, vel_width_res[1]*1000, vel_width_res[2]))
fit_hdu.header['VEL_BL'] = vel_width_res[0]*1000, 'Broad line FWHM'
fit_hdu.header['VEL_NL'] = vel_width_res[1]*1000, 'Narrow line FWHM'
fit_hdu.header['VEL_Z'] = vel_width_res[2], 'Line width, best redshift'
fit_hdu.header['VEL_NFEV'] = vel_width_res[3], 'Line width, NFEV'
fit_hdu.header['VEL_FLAG'] = vel_width_res[4], 'Line width, NFEV'
if phot is not None:
mb.set_photometry(**phot)
# Best-fit template itself
tfit_sp = utils.GTable()
for ik, key in enumerate(tfit['cfit']):
for save in [tfit_sp.meta]:
save['CVAL{0:03d}'.format(ik)] = tfit['cfit'][key][0], 'Coefficient for {0}'.format(key)
save['CERR{0:03d}'.format(ik)] = tfit['cfit'][key][1], 'Uncertainty for {0}'.format(key)
save['CNAME{0:03d}'.format(ik)] = key, 'Template name'
tfit_sp['wave'] = tfit['cont1d'].wave
tfit_sp['continuum'] = tfit['cont1d'].flux
tfit_sp['full'] = tfit['line1d'].flux
tfit_sp['wave'].unit = tfit['cont1d'].waveunits
tfit_sp['continuum'].unit = tfit['cont1d'].fluxunits
tfit_sp['full'].unit = tfit['line1d'].fluxunits
tfit_hdu = pyfits.table_to_hdu(tfit_sp)
tfit_hdu.header['EXTNAME'] = 'TEMPL'
# Make the plot
fig = mb.xmake_fit_plot(mb_fit, tfit, show_beams=show_beams, scale_on_stacked_1d=scale_on_stacked_1d)
# Add prior
if prior is not None:
fig.axes[0].plot(prior[0], np.log10(prior[1]), color='#1f77b4', alpha=0.5)
# Add stack fit to the existing plot
fig.axes[0].plot(fit['zgrid'], np.log10(fit['pdf']), color='0.5', alpha=0.5)
fig.axes[0].set_xlim(fit['zgrid'].min(), fit['zgrid'].max())
if phot is not None:
fig.axes[1].errorbar(mb.photom_pivot/1.e4, mb.photom_flam/1.e-19, mb.photom_eflam/1.e-19, marker='s', alpha=0.5, color='k', linestyle='None')
#fig.axes[1].plot(tfit['line1d'].wave/1.e4, tfit['line1d'].flux/1.e-19, color='k', alpha=0.2, zorder=100)
# Save the figure
fig.savefig('{0}_{1:05d}.full.png'.format(group_name, id))
if redshift_only:
return mb, st, fit, tfit, None
# Make the line maps
if pline is None:
pzfit, pspec2, pline = grizli.multifit.get_redshift_fit_defaults()
line_hdu = mb.drizzle_fit_lines(tfit, pline, force_line=utils.DEFAULT_LINE_LIST, save_fits=False, mask_lines=True, mask_sn_limit=mask_sn_limit, verbose=verbose, get_ir_psfs=get_ir_psfs)
# Add beam exposure times
exptime = mb.compute_exptime()
for k in exptime:
line_hdu[0].header['T_{0}'.format(k)] = (exptime[k], 'Total exposure time [s]')
line_hdu.insert(1, fit_hdu)
line_hdu.insert(2, cov_hdu)
if fit_beams:
line_hdu.insert(2, mb_fit_hdu)
line_hdu.insert(3, tfit_hdu)
line_hdu.writeto('{0}_{1:05d}.full.fits'.format(group_name, id), clobber=True, output_verify='fix')
# 1D spectrum
oned_hdul = mb.oned_spectrum_to_hdu(tfit=tfit, bin=1, outputfile='{0}_{1:05d}.1D.fits'.format(group_name, id))#, units=units1d)
######
# Show the drizzled lines and direct image cutout, which are
# extensions `DSCI`, `LINE`, etc.
s, si = 1, line_size
s = 4.e-19/np.max([beam.beam.total_flux for beam in mb.beams])
s = np.clip(s, 0.25, 4)
full_line_list = ['Lya', 'OII', 'Hb', 'OIII', 'Ha', 'SII', 'SIII']
fig = show_drizzled_lines(line_hdu, size_arcsec=si, cmap='plasma_r', scale=s, dscale=s, full_line_list=full_line_list)
fig.savefig('{0}_{1:05d}.line.png'.format(group_name, id))
if phot is not None:
out = mb, st, fit, tfit, line_hdu
if 'pz' in phot:
full_sed_plot(mb, tfit, zfit=fit, photometry_pz=phot['pz'], **sed_args)
else:
full_sed_plot(mb, tfit, zfit=fit, **sed_args)
return mb, st, fit, tfit, line_hdu
###################################
def full_sed_plot(mb, tfit, zfit=None, bin=1, minor=0.1, save='png', sed_resolution=180, photometry_pz=None, zspec=None, spectrum_steps=False, xlim=[0.3, 9], **kwargs):
"""
Make a separate plot showing photometry and the spectrum
"""
#import seaborn as sns
import prospect.utils.smoothing
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
#mpl_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
mpl_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# sns_colors = colors = sns.color_palette("cubehelix", 8)
### seaborn cubehelix colors
sns_colors = colors = [(0.1036, 0.094, 0.206),
(0.0825, 0.272, 0.307),
(0.1700, 0.436, 0.223),
(0.4587, 0.480, 0.199),
(0.7576, 0.476, 0.437),
(0.8299, 0.563, 0.776),
(0.7638, 0.757, 0.949),
(0.8106, 0.921, 0.937)]
# Best-fit
#mb = out[0]
#zfit = out[2]
#tfit = out[3]
t1 = tfit['templates']
best_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux])
flat_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux*0+1])
bg = mb.get_flat_background(tfit['coeffs'])
sp = mb.optimal_extract(mb.scif[mb.fit_mask][:-mb.Nphot] - bg, bin=bin)#['G141']
spm = mb.optimal_extract(best_model, bin=bin)#['G141']
spf = mb.optimal_extract(flat_model, bin=bin)#['G141']
# Photometry
A_phot = mb._interpolate_photometry(z=tfit['z'], templates=t1)
A_model = A_phot.T.dot(tfit['coeffs'])
photom_mask = mb.photom_eflam > -98
##########
# Figure
if True:
if zfit is not None:
fig = plt.figure(figsize=[11, 9./3])
gs = gridspec.GridSpec(1,3, width_ratios=[1,1.5,1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
else:
fig = plt.figure(figsize=[9, 9./3])
gs = gridspec.GridSpec(1,2, width_ratios=[1,1.5])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
else:
gs = None
fig = plt.figure(figsize=[9, 9./3])
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Photometry SED
ax1.errorbar(np.log10(mb.photom_pivot[photom_mask]/1.e4), mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=30)
sm = prospect.utils.smoothing.smoothspec(tfit['line1d'].wave, tfit['line1d'].flux, resolution=sed_resolution, smoothtype='R') #nsigma=10, inres=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=11)
yl1 = ax1.get_ylim()
ax1.plot(np.log10(tfit['line1d'].wave/1.e4), sm/1.e-19, color=sns_colors[4], linewidth=1, zorder=0)
#ax1.grid()
ax1.set_xlabel(r'$\lambda$ / $\mu$m')
ax2.set_xlabel(r'$\lambda$ / $\mu$m')
# Spectrum
ymax, ymin = -1e30, 1e30
for g in sp:
sn = sp[g]['flux']/sp[g]['err']
clip = sn > 3
clip = spf[g]['flux'] > 0.2*spf[g]['flux'].max()
try:
scale = mb.compute_scale_array(mb.pscale, sp[g]['wave'])
except:
scale = 1
ax2.errorbar(sp[g]['wave'][clip]/1.e4, (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.5, linestyle='None', elinewidth=0.5, zorder=11)
if spectrum_steps:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, linestyle='steps-mid')
else:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, marker='.')
ymax = np.maximum(ymax, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].max())
ymin = np.minimum(ymin, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].min())
ax1.errorbar(np.log10(sp[g]['wave'][clip]/1.e4), (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.2, linestyle='None', elinewidth=0.5, zorder=-100)
xl, yl = ax2.get_xlim(), ax2.get_ylim()
yl = (ymin-0.3*ymax, 1.3*ymax)
# SED x range
if xlim is None:
okphot = (mb.photom_eflam > 0)
xlim = [np.minimum(xl[0]*0.7, 0.7*mb.photom_pivot[okphot].min()/1.e4), np.maximum(xl[1]/0.7, mb.photom_pivot[okphot].max()/1.e4/0.7)]
ax1.set_xlim(np.log10(xlim[0]), np.log10(xlim[1]))
ticks = np.array([0.5, 1, 2, 4, 8])
ticks = ticks[(ticks >= xlim[0]) & (ticks <= xlim[1])]
ax1.set_xticks(np.log10(ticks))
ax1.set_xticklabels(ticks)
# Back to spectrum
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=11)
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=12)
ax2.errorbar(mb.photom_pivot[photom_mask]/1.e4, mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=20)
ax2.set_xlim(xl); ax2.set_ylim(yl)
ax2.set_yticklabels([])
#ax2.set_xticks(np.arange(1.1, 1.8, 0.1))
#ax2.set_xticklabels([1.1, '', 1.3, '', 1.5, '', 1.7])
ax2.xaxis.set_minor_locator(MultipleLocator(minor))
ax2.xaxis.set_major_locator(MultipleLocator(minor*2))
# Show spectrum range on SED panel
xb, yb = np.array([0, 1, 1, 0, 0]), np.array([0, 0, 1, 1, 0])
ax1.plot( | np.log10(xl[0]+xb*(xl[1]-xl[0])) | numpy.log10 |
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, MaxNLocator
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
def decompress_gain(Sweep_Array, loop, metadata,Compression_Calibration_Index = -1, Show_Plot = True, Verbose = True):
''' Assumes the two lowest input powers of the power sweep are not gain compressed, thus
cannot be used if the two lowest powers are gain compressed. '''
Sweep_Array_Record_Index = loop.index
V = Sweep_Array['Heater_Voltage'][Sweep_Array_Record_Index]
Fs = Sweep_Array['Fstart'][Sweep_Array_Record_Index]
P = Sweep_Array['Pinput_dB'][Sweep_Array_Record_Index]
Sweep_Array = np.extract((Sweep_Array['Heater_Voltage'] == V) & ( Sweep_Array['Fstart']==Fs) , Sweep_Array)
num_sweep_powers = Sweep_Array['Pinput_dB'].shape[0]
if num_sweep_powers <= 4:
print('Number of sweep powers, {0}, is insufficient to perform gain decompression.'.format(num_sweep_powers))
return
#else:
# print('Performing gain decompression on {0} sweep powers.'.format(num_sweep_powers))
Pin = np.power(10, Sweep_Array['Pinput_dB']/10.0) #mW, Probe Power
#ChooseCompression calobration data from Power Sweep Data.
#It is the S21(Compression_Calibration_Index) for every sweep power
compression_calibration_data = np.power(np.abs(Sweep_Array['S21'][:,Compression_Calibration_Index]),2) #Pout/Pin,
# alternatively : np.average(Sweep_Array['S21'][:,Compression_Calibration_Index:Compression_Calibration_Index+n],axis = 1) #average over n freq points.
Pout = compression_calibration_data*Pin
# calculated_power_gain is power gain calculated from the slope of the two smallest input powers in Pin
values, indices = np.unique(Pin, return_index=True)
min_index,min_plus_index = indices[:2]
# When Pin = 0, 0 != Pout = Pin*gaain. There is an offset, i.e. a y-intercept, b, such at y = m*x+b. Next, we find m.
calculated_power_gain = (Pout[min_plus_index] - Pout[min_index])/(Pin[min_plus_index ]-Pin[min_index])
#Pout_ideal is the output power assuming linear gain
Pout_ideal = lambda p_in: calculated_power_gain*(p_in-Pin[0]) + Pout[0]
Probe_Power_Mag = np.power(10,Sweep_Array[Sweep_Array_Record_Index]['Pinput_dB']/10) #-- Substitute for input power
S21 = Sweep_Array[Sweep_Array_Record_Index]['S21']
S21_Pout = np.power(np.abs(S21),2)*Probe_Power_Mag
# create interpolation funcation to what Pin would be at an arbitrary Pout
decompression_function = interp1d(Pout,Pin,kind = 'linear')
# for polynomial to Pout vs Pin curve and use this to extrapolate values where Pout in not in interpolation domain
def decompression_function_fit(pout, a,b,c):
return a*np.power(pout,2)+b*pout+c
popt,pcov = curve_fit(decompression_function_fit, Pout, Pin)
decompression_function_extrap = lambda pout : decompression_function_fit(pout,popt[0],popt[1],popt[2])
def decompress_element(z):
z_Pout = np.power( | np.abs(z) | numpy.abs |
from bs4 import BeautifulSoup
import re
from os import listdir
from os.path import isfile, join
import numpy as np
from scipy.optimize import curve_fit
# Initialise some arrays for analyses later
exam_difficulties = []
master_questions_arr = []
# Allow user to choose which folder to ultimately extract converted pdf->html files from.
yn = input("methods (y) or spec (n): ")
if yn.lower() == "y":
folder = 'Methods-Exams'
else:
folder = 'Spec-Exams'
allPDFs = [f for f in listdir(folder) if isfile(join(folder, f))] #Get list of files in spec-exams folder
for file in range(0,len(allPDFs)):
#Setup Variables
code = data = open(folder+"/"+allPDFs[file], encoding="utf8")
html = code.read()
allQuestions = []
allTables = []
allH3 = []
#
# EXTRACT DATA AND FILTER DATA
#
soup = BeautifulSoup(html, "html.parser")
tabletag = soup.body.findAll('table')
exam_id = soup.findAll('title')[0].text #Info about this exam
#print(exam_id)
#required funciton
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
#filter tables
for table in tabletag:
if table.text.find("Marks") != -1:
allTables.append(table)
# Identify questions
for i in range(2,6):
h3tag = soup.body.findAll('h'+str(i))
for h3 in h3tag:
if h3.text.find("Question") != -1 and hasNumbers(h3.text):
allH3.append(h3)
if len(allH3) > 0:
break
#
# ACCOUNT FOR POSSIBLE HOLES IN THE DATA
#
if len(allH3) != len(allTables): #ONLY IF THERE IS NO 'One-to-one' RELATIONSHIP (else the data has holes)
indexes_of_elements = [] #array to store 'positions' of each element in html
# Fill array of positions for titles
for i in range(0,len(allH3)):
if html.count(allH3[i].text) > 1:
if html.strip().find(allH3[i].text+"</h3") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h3"),"h3"])
elif html.strip().find(allH3[i].text+"</a") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</a"),"h3"])
elif html.strip().find(allH3[i].text+"</h4") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h4"),"h3"])
elif html.strip().find(allH3[i].text+"</h2") != -1:
indexes_of_elements.append([html.strip().find(allH3[i].text+"</h2"),"h3"])
elif html.count(allH3[i].text) == 1:
indexes_of_elements.append([html.strip().find(allH3[i].text),"h3"])
previous_search_s = indexes_of_elements[0][0]
index1 = 0
# Fill array of positions for tables
while index1 != -1:
index1 = html.strip().find("<table",previous_search_s) #the left point
if index1 != -1:
indexes_of_elements.append([index1, "table"])
previous_search_s = index1+1
#Sort by order of appearance
indexes_of_elements = sorted(indexes_of_elements,key=lambda x: x[0])
running_index = 0
output = []
#Iterate with a running index to find inconsistencies in the data
for i in range(0,len(indexes_of_elements)):
#print(indexes_of_elements[i][1] + " ----- " + str(indexes_of_elements[i][0]) + " ------- " + html[indexes_of_elements[i][0]:indexes_of_elements[i][0]+20])
if indexes_of_elements[i][1] == "table":
running_index = running_index - 1
output.append("T")
elif indexes_of_elements[i][1] != "table":
running_index = running_index + 1
output.append("H")
if running_index == -1:
#Mismatch has occured, input a dummy title
output[len(output)-1] = "E"
output.append("T")
running_index = 0
elif running_index == 2:
#Mismatch has occured, input a dummy title
output[len(output)-1] = "M"
output.append("H")
running_index = 1
#Create one-to-one relationship array
j1=0
j2=0
#print(output)
for i in range(1, len(output)+1):
if i % 2 == 0: #Every H-T pair
if output[i-2] != "E" and output[i-1] != "M":
#print(j1,len(allH3),j2,len(allTables))
allQuestions.append([allH3[j1].text,allTables[j2]])
j1+=1
j2+=1
elif output[i-2] == "E":
try:
allQuestions.append(["Missing (between " + allH3[j1-1].text + " and " + allH3[j1].text + ")",allTables[j2]])
except:
allQuestions.append(["Missing (Unknown location)",allTables[j2]])
j2+=1
elif output[i-1] == "M":
allQuestions.append([allH3[j1].text,"Missing"])
j1+=1
else:
for i in range(0, len(allH3)):
allQuestions.append([allH3[i].text,allTables[i]])
#print(str(len(allQuestions)) + " Questions. From Hardest-Easiest:") #print the length (i.e-#of questions)
#
#DATA MANIPULATION
#
#Calculate difficulty ratings
for i in range(0, len(allQuestions)):
if allQuestions[i][1] != "Missing":
marks = int(allQuestions[i][1].text.split('A')[0].strip()[-1])
try:
marks = int(allQuestions[i][1].text.split('A')[0].strip()[-1])
data = []
table = allQuestions[i][1]
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele]) # Get rid of empty values
percentages = data[1]
average = 0
mark = 0
for j in range(1,marks+2):
average += (int(percentages[j])/100)*mark
mark += 1
diff = average/marks
allQuestions[i].append(diff)
except:
try:
avg = float(re.findall("\d\.\d", allQuestions[i][1].text)[0])
diff = avg/marks
allQuestions[i].append(diff)
except:
try:
avg = float(allQuestions[i][1].text[len(allQuestions[i][1].text)-1:len(allQuestions[i][1].text)])
diff = avg/marks
if diff <= 1:
allQuestions[i].append(diff)
else:
print("error" + 1)
except:
avg = -1
else:
allQuestions[i].append(-2)
#Sort allQuestions list by difficulty
#allQuestions = sorted(allQuestions,key=lambda x: x[2])
sum_diff = 0
#Add exam year to allQuestions and display questions
for i in range(0, len(allQuestions)):
allQuestions[i].append(exam_id)
#print(allQuestions[i][0], "-", allQuestions[i][2])
sum_diff += allQuestions[i][2]
master_questions_arr.append(allQuestions[i])
avgDiff = sum_diff/len(allQuestions)
exam_difficulties.append([avgDiff,exam_id])
#print("Overall Difficulty: ", avgDiff)
master_questions_arr = sorted(master_questions_arr,key=lambda x: x[2]) #Sort all questions by difficulty
print("Loaded " + str(len(master_questions_arr)) + " total questions from " + str(len(exam_difficulties)) + " exams.")
user = input("Do you want questions with missing tables to be displayed? (y/n): ")
#Display ALL QUESTIONS:
for question in master_questions_arr:
if question[2] == -2:
#Lost data
if user.lower() == "y":
print(question[0], "-", "MISSING TABULAR DATA", " from: ", question[3])
elif question[2] == -1 or question[2] > 1:
#Edge Case
print(question[0], " - EXTREME EDGE CASE, from: ", question[3])
elif question[2] >= 0 and question[2] <= 1:
print(question[0], "-", question[2], " from: ", question[3])
#Display difficulty distribution graph
import csv
import matplotlib.pyplot as plt
import numpy as np
average_list = []
for question in master_questions_arr:
if question[2] > 0 and question[2] <= 1:
average_list.append(question[2])
plt.hist(average_list, bins = 10)
plt.show()
np.mean(average_list)
| np.median(average_list) | numpy.median |
# *-* encoding: utf-8 *-*
# Unit tests for ppn functions
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from faster_particles.ppn_utils import generate_anchors, \
top_R_pixels, clip_pixels, \
compute_positives_ppn1, compute_positives_ppn2, assign_gt_pixels, \
include_gt_pixels, predicted_pixels, crop_pool_layer, \
all_combinations, slice_rois, \
nms_step, nms
def generate_anchors_np(im_shape, repeat=1):
dim = len(im_shape)
anchors = np.indices(im_shape).transpose(tuple(range(1, dim+1)) + (0,))
anchors = anchors + 0.5
anchors = np.reshape(anchors, (-1, dim))
return np.repeat(anchors, repeat, axis=0)
def clip_pixels_np(pixels, im_shape):
"""
pixels shape: [None, 2]
Clip pixels (x, y) to [0, im_shape[0]) x [0, im_shape[1])
"""
dim = len(im_shape)
for i in range(dim):
pixels[:, i] = np.clip(pixels[:, i], 0, im_shape[i])
return pixels
class Test(unittest.TestCase):
def generate_anchors(self, im_shape, repeat):
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
with tf.Session():
anchors_tf = generate_anchors(im_shape, repeat=repeat)
return np.array_equal(anchors_tf, anchors_np)
def test_generate_anchors_2d(self):
im_shape = (2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def test_generate_anchors_3d(self):
im_shape = (2, 2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def clip_pixels(self, im_shape, proposals_np):
pixels_np = clip_pixels_np(proposals_np, im_shape)
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
pixels = clip_pixels(proposals, im_shape)
pixels_tf = sess.run(pixels)
return np.allclose(pixels_np, pixels_tf)
def test_clip_pixels_2d(self):
im_shape = (3, 3)
proposals_np = np.array([[-0.5, 1.0], [0.01, 3.4], [2.5, 2.99]])
return self.clip_pixels(im_shape, proposals_np)
def test_clip_pixels_3d(self):
im_shape = (2, 2, 2)
proposals_np = np.random.rand(5, 3)*4-1
return self.clip_pixels(im_shape, proposals_np)
def top_R_pixels(self, R, threshold, proposals_np, scores_np):
threshold_indices = np.nonzero(scores_np > threshold)
scores_np = scores_np[threshold_indices]
proposals_np = proposals_np[threshold_indices]
sorted_indices = np.argsort(scores_np)
roi_scores_np = scores_np[sorted_indices][::-1][:R]
rois_np = proposals_np[sorted_indices][::-1][:R]
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
scores = tf.constant(scores_np, dtype=tf.float32)
rois, roi_scores = top_R_pixels(proposals, scores, R=R, threshold=threshold)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_np, roi_scores_tf)
def test_top_R_pixels_2d(self):
R = 3
threshold = 0.5
# Shape N*N x 2
proposals_np = np.array([[0.0, 1.0], [0.5, 0.7], [0.3, 0.88], [-0.2, 0.76], [0.23, 0.47], [0.33, 0.56], [0.0, 0.4], [-0.6, 0.3], [0.27, -0.98]])
# Shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98, 0.72])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def test_top_R_pixels_3d(self):
R = 3
threshold = 0.5
# shape N*N x 3
proposals_np = np.array([[0.0, 1.0, 0.3], [0.87, 0.1, -0.34], [0.45, 0.68, 0.09],
[0.34, 0.21, -0.6], [0.12, -0.4, 0.8], [0.48, 0.43, -0.79], [0.89, 0.05, -0.02], [0.9, 0.04, 1.0]])
# shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def predicted_pixels(self, im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np):
dim = len(im_shape)
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
scores = rpn_cls_prob_np[..., 1:]
roi_scores_np = np.reshape(scores, (-1, scores.shape[-1]))
anchors_np = np.reshape(anchors_np, (-1,) + (rpn_cls_prob_np.shape[1],) * dim + (dim,))
proposals = anchors_np + rpn_bbox_pred_np
proposals = np.reshape(proposals, (-1, dim))
# clip predicted pixels to the image
proposals = clip_pixels_np(proposals, im_shape) # FIXME np function
rois_np = proposals.astype(float)
with tf.Session() as sess:
anchors_tf = generate_anchors(im_shape, repeat=repeat)
rpn_cls_prob_tf = tf.constant(rpn_cls_prob_np, dtype=tf.float32)
rpn_bbox_pred_tf = tf.constant(rpn_bbox_pred_np, dtype=tf.float32)
rois, roi_scores = predicted_pixels(rpn_cls_prob_tf, rpn_bbox_pred_tf, anchors_tf, im_shape)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_tf, roi_scores_np)
def test_predicted_pixels1_2d(self): # for PPN1
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = 2 (background/signal)
rpn_cls_prob_np = np.array([[[[0.1, 0.9], [0.3, 0.7]], [[0.5, 0.5], [0.8, 0.2]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels1_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 2)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_2d(self): # for PPN2
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = num_classes
rpn_cls_prob_np = np.array([[[[0.1, 0.8, 0.1], [0.3, 0.65, 0.05]], [[0.5, 0.02, 0.48], [0.8, 0.18, 0.02]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 3)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def include_gt_pixels(self, rois_np, gt_pixels_np, dim1, dim2):
dim = gt_pixels_np.shape[-1]
# convert to F3 coordinates
gt_pixels_coord = | np.floor(gt_pixels_np / dim1) | numpy.floor |
import numpy as np
from torchvision import datasets, transforms
import cv2 as cv
import torch
import matplotlib.pyplot as plt
import scipy.io as sio
def batch_tensor_to_3dti(data, lift_dim=6):
dts = []
for k in range(data.shape[0]):
img = data[k, 0, :, :].numpy()
dti_img = img_to_3dti(img, lift_dim)
dts.append(dti_img)
tensor_dti_img = torch.tensor(dts, dtype=torch.float32)
return tensor_dti_img
def img_to_3dti(img, lift_dim):
dim = img.shape[0]
w = 0.3
dt = | np.zeros([3, 3, dim, dim, lift_dim]) | numpy.zeros |
import matplotlib.pyplot as plt
from matplotlib.colors import TwoSlopeNorm
import numpy as np
import einops
import xarray as xr
from climart.data_loading.constants import get_coordinates
def set_labels_and_ticks(ax,
title: str = "",
xlabel: str = "", ylabel: str = "",
xlabel_fontsize: int = 10, ylabel_fontsize: int = 14,
xlim=None, ylim=None,
xticks=None, yticks=None,
title_fontsize: int = None,
xticks_fontsize: int = None, yticks_fontsize: int = None,
xtick_labels=None, ytick_labels=None,
logscale_y: bool = False,
show: bool = True,
grid: bool = True,
legend: bool = True, legend_loc='best', legend_prop=10,
full_screen: bool = False,
tight_layout: bool = True,
save_to: str = None
):
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlabel(xlabel, fontsize=xlabel_fontsize)
ax.set_ylabel(ylabel, fontsize=ylabel_fontsize)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if xticks is not None:
ax.set_xticks(xticks)
if xtick_labels is not None:
ax.set_xticklabels(xtick_labels)
if xticks_fontsize:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(xticks_fontsize)
# tick.label.set_rotation('vertical')
if logscale_y:
ax.set_yscale('log')
if yticks is not None:
ax.set_yticks(yticks)
if ytick_labels is not None:
ax.set_yticklabels(ytick_labels)
if yticks_fontsize:
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(yticks_fontsize)
if grid:
ax.grid()
if legend:
ax.legend(loc=legend_loc, prop={'size': legend_prop}) #if full_screen else ax.legend(loc=legend_loc)
if tight_layout:
plt.tight_layout()
if save_to is not None:
if full_screen:
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
plt.savefig(save_to, bbox_inches='tight')
if full_screen:
mng.full_screen_toggle()
if show:
plt.show()
class RollingCmaps:
def __init__(self,
unique_keys: list,
pos_cmaps: list = None,
max_key_occurence: int = 5):
if pos_cmaps is None:
pos_cmaps = ['Greens', 'Oranges', 'Blues', 'Greys', 'Purples']
pos_cmaps = [plt.get_cmap(cmap) for cmap in pos_cmaps]
self.cmaps = {key: pos_cmaps[i] for i, key in enumerate(unique_keys)}
self.pos_per_cmap = {key: 0.75 for key in unique_keys} # lower makes lines too white
self.max_key_occurence = max_key_occurence
def __getitem__(self, key):
color = self.cmaps[key](self.pos_per_cmap[key] / self.max_key_occurence) # [self.pos_per_cmap[key]]
self.pos_per_cmap[key] += 1
return color
class RollingLineFormats:
def __init__(self,
unique_keys: list,
pos_markers: list = None,
cmap = None,
linewidth: float = 4
):
print(unique_keys)
if pos_markers is None:
pos_markers = ['-', '--', ':', '-', '-.']
if cmap is None:
cmap = plt.get_cmap('viridis')
cs = ['#1f77b4', '#ff7f0e', '#2ca02c', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#d62728', '#bcbd22', '#17becf']
# cs = plt.rcParams['axes.prop_cycle'].by_key()['color']
self.pos_markers = pos_markers
# self.cmaps = {key: cmap(i/len(unique_keys)) for i, key in enumerate(unique_keys)}
self.cmaps = {key: cs[i] for i, key in enumerate(unique_keys)}
self.pos_per_key = {key: 0 for key in unique_keys} # lower makes lines too white
self.lws = {key: linewidth for key in unique_keys}
def __getitem__(self, key):
cur_i = self.pos_per_key[key]
lw = self.lws[key]
line_format = self.pos_markers[cur_i] # [self.pos_per_cmap[key]]
self.pos_per_key[key] += 1
self.lws[key] = max(1, lw - 1)
return line_format, dict(c=self.cmaps[key], linewidth=lw)
def plot_groups(xaxis_key, metric='Test/MAE', ax=None, show: bool = True, **kwargs):
if not ax:
fig, ax = plt.subplots() # 1
for key, group in kwargs.items():
group.plot(xaxis_key, metric, yerr='std', label=key, ax=ax)
set_labels_and_ticks(
ax, xlabel='Used training points', ylabel=metric, show=show
)
def height_errors(Ytrue: np.ndarray, preds: np.ndarray, height_ticks=None,
xlabel='', ylabel='height', fill_between=True, show=True):
"""
Plot MAE and MBE as a function of the height/pressure
:param Ytrue:
:param preds:
:param height_ticks: must have same shape as Ytrue.shape[1]
:param show:
:return:
"""
n_samples, n_levels = Ytrue.shape
diff = Ytrue - preds
abs_diff = np.abs(diff)
levelwise_MBE = np.mean(diff, axis=0)
levelwise_MAE = np.mean(abs_diff, axis=0)
levelwise_MBE_std = np.std(diff, axis=0)
levelwise_MAE_std = np.std(abs_diff, axis=0)
# Plotting
plotting_kwargs = {'yticks': height_ticks, 'ylabel': ylabel, 'show': show, "fill_between": fill_between}
yaxis = np.arange(n_levels)
figMBE = height_plot(yaxis, levelwise_MBE, levelwise_MBE_std, xlabel=xlabel + ' MBE', **plotting_kwargs)
figMAE = height_plot(yaxis, levelwise_MAE, levelwise_MAE_std, xlabel=xlabel + ' MAE', **plotting_kwargs)
if show:
plt.show()
return figMAE, figMBE
def height_plot(yaxis, line, std, yticks=None, ylabel=None, xlabel=None, show=False, fill_between=True):
fig, ax = plt.subplots(1)
if "mbe" in xlabel.lower():
# to better see the bias
ax.plot(np.zeros(yaxis.shape), yaxis, '--', color='grey')
p = ax.plot(line, yaxis, '-', linewidth=3)
if fill_between:
ax.fill_betweenx(yaxis, line - std, line + std, alpha=0.2)
else:
ax.plot(line - std, yaxis, '--', color=p[0].get_color(), linewidth=1.5)
ax.plot(line + std, yaxis, '--', color=p[0].get_color(), linewidth=1.5)
xlim = [0, ax.get_xlim()[1]] if 'mae' in xlabel.lower() or 'rmse' in xlabel.lower() else None
set_labels_and_ticks(ax=ax, xlabel=xlabel, xlim=xlim,
yticks=yaxis, ytick_labels=yticks,
ylabel=ylabel, show=show)
return fig
def level_errors(Y_true, Y_preds, epoch):
errors = np.mean((Y_true - Y_preds), axis=0)
colours = ['red' if x < 0 else 'green' for x in errors]
index = np.arange(0, len(colours), 1)
# Draw plot
lev_fig = plt.figure(figsize=(14, 14), dpi=80)
plt.hlines(y=index, xmin=0, xmax=errors)
for x, y, tex in zip(errors, index, errors):
t = plt.text(x, y, round(tex, 2), horizontalalignment='right' if x < 0 else 'left',
verticalalignment='center', fontdict={'color': 'red' if x < 0 else 'green', 'size': 10})
# Styling
plt.yticks(index, ['Level: ' + str(z) for z in index], fontsize=12)
plt.title(f'Average Level-wise error for epoch: {epoch}', fontdict={'size': 20})
plt.grid(linestyle='--', alpha=0.5)
plt.xlim(-5, 5)
return lev_fig
def profile_errors(Y_true, Y_preds, plot_profiles=200, var_name=None, data_dir: str = None,
error_type='mean', plot_type='scatter', set_seed=False, title=""):
coords_data = get_coordinates(data_dir)
lat = list(coords_data.get_index('lat'))
lon = list(coords_data.get_index('lon'))
total_profiles, n_levels = Y_true.shape
if set_seed: # To get the same profiles everytime
np.random.seed(7)
errors = np.abs(Y_true - Y_preds)
# print(errors.shape, Y_true.shape, total_profiles / 8192)
if plot_type.lower() == 'scatter':
latitude = []
longitude = []
for i in lat:
for j in lon:
latitude.append(i)
longitude.append(j)
lat_var = np.array(latitude)
lon_var = np.array(longitude)
n_times = int(total_profiles / 8192)
indices = np.arange(0, total_profiles)
indices_train = np.random.choice(total_profiles, total_profiles - plot_profiles, replace=False)
indices_rest = | np.setxor1d(indices_train, indices, assume_unique=True) | numpy.setxor1d |
import numpy as np
class Prototype_Selector:
def __init__(self, data_x, datalabel_y, M = 10):
"""
:param data_x:
:param datalabel_y:
:param test_x:
:param testlabel_y:
"""
self.x_train = np.array(data_x)
self.y_train = np.array(datalabel_y)
self.M = M
self.bbag = None
self.gratios = None
def k_mean_cluster_return_centroid(self, population, centroid_number=10):
"""
returns centroids from population
"""
from sklearn.cluster import KMeans
original_shape = np.array(population).shape
X = | np.array(population) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import itertools
from matplotlib import dates
import itertools
import datetime
#-----------------------------------------------------------------------------
# Rutas para guardar ---------------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##----------------------------------------Método 1 radiación al tope de la atmosfera-----------------------------------------##
##---CALCULO DE LA DECLINACION SOLAR---##
J = np.arange(1, 366, 1)
g = 2*m.pi*(J-1)/365
d = (0.006918 - 0.399912*np.cos(g) + 0.070257*np.sin(g) - 0.006758*np.cos(2*g) + 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g)+ 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g))
dd = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in d))
##---CALCULO DEL ANGULO HORARIO---##
def daterange(start_date, end_date):
delta = timedelta(hours=1)
while start_date < end_date:
yield start_date
start_date += delta
##---Ecuación del tiempo---##
B = 2*m.pi*(J-81)/365
ET = 9.87*np.sin(2*B)-7.53*np.cos(B)-1.5* | np.cos(B) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 29 14:20:43 2016
Updated 26/5/17
@author: robin
"""
##Extends NISTScrape.py and SVRtest.py
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.optimize import minimize
from scipy import misc
from PIL import Image
from datetime import datetime
##Uses SVR model obtained by collecting data from NIST for variation in
##Carbon loading and Pt loading at energy of 32.5keV
model_filename = 'SVR_model_Ion.sav'
#Base directory
global C_scale
C_scale = 0 #Percent loss ie 0.7 loss, 70% less, 30% remaining in CCL from CO2 measurements
cyclenum = "BOL"
baseDir = r"E:\processed\Cell29\CL_Analysis\\" + cyclenum + "\\"
#Output filenames
IonImage = cyclenum + "_I_Load.tif"
PtImage = cyclenum + "_Pt_Load.tif"
CImage = cyclenum + "_C_Load.tif"
DensityImage = cyclenum + "_Density.tif"
TotalLoadImage = cyclenum + "_TotalLoad.tif"
MattenImage = cyclenum + "_Matten.tif"
PorosityImage = cyclenum + "_Porosity.tif"
##------------Initialization-------------##
#Load images
Timage = misc.imread(baseDir + cyclenum + "_thickness.tif") #Thickness_map16
Gimage = misc.imread(baseDir + cyclenum + "_MAX.tif") #BOL_avg_flat
#Pixel size um
pix = 1.53
#Sub of area if necessary
T = Timage#[400:500, 400:500] #Timage
G = Gimage#[400:500, 400:500] #Gimage
#Cmap = Cmapimage[400:500, 400:500]
#Ptmap = Ptmapimage[400:500, 400:500]
#Imap = Imapimage[400:500, 400:500]
#Thickness Calibration 49.6um 2^16 - 1 from 16bitGS to thickness value
#calib = 49.6/((2**16)-1)
# load the SVR model from disk
loaded_model = pickle.load(open(model_filename, 'rb'))
#Calibration curve for GSV calc only ##Updated for MAX GSV
mcal = 2390.5 #Max
bcal = 22974
#BOL expected values for 50/50 C/Pt 23wt% Ionomer
wt_exp_Ion = 23
wt_exp_Pt = (100-wt_exp_Ion)*0.5
wt_exp_C = 100 - wt_exp_Ion - wt_exp_Pt
load_exp_C = 0.4
load_exp_Pt = 0.4
load_exp_Ion = (wt_exp_Ion/wt_exp_Pt)*load_exp_Pt
#Molar masses
M_C = 12
M_Pt = 195
M_Ion = 544
M_water = 18
MM = np.array([M_C,M_Pt,M_Ion,1])
#Density of particles
Cp = 2.266
Ptp = 21.45
Ip = 1.8
#Volume cm^3
vox = (pix**3)*0.000000000001
#Array initialization
Matten_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
C_load_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
Pt_load_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
Ion_load_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
Density_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
TotalLoad_array = np.zeros((T.shape[0],T.shape[1]), dtype=float)
Porosity_array = | np.zeros((T.shape[0],T.shape[1]), dtype=float) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
funcs = [np.sum,np.prod,np.max]
inputs = [np.random.rand(i) for i in 10** | np.arange(5) | numpy.arange |
import eigenBot
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Set constants
startTime = dt.datetime(2012, 1, 1)
endTime = dt.datetime(2013, 12, 31)
prices = pd.DataFrame()
if __name__ == "__main__":
config, tickers = eigenBot.loadConfig()
numTicks = 1
for tick in tickers:
prices[tick] = eigenBot.getTickerQuotes(config, tick, startTime, endTime)
returns = prices.pct_change()
# print("Data:\n", prices.head())
# print("Percent Change:\n", returns.head())
returns = returns.iloc[1:, :] # Remove first row of NA's
training_period = 30
in_sample = returns.iloc[:(returns.shape[0]-training_period), :].copy()
# Save the tickers
tickList = returns.columns.copy()
# Set up plotting
covariance_matrix = in_sample.cov()
D, S = np.linalg.eigh(covariance_matrix)
eigenportfolio_1 = S[:,-1] / np.sum(S[:,-1]) # Normalize to sum to 1
eigenportfolio_2 = S[:,-2] / np.sum(S[:,-2]) # Normalize to sum to 1
# Setup Portfolios
eigenportfolio = pd.DataFrame(data= eigenportfolio_1, columns = ['Investment Weight'], index = tickers)
eigenportfolio2 = pd.DataFrame(data= eigenportfolio_2, columns = ['Investment Weight'], index = tickers)
# Plot
# f = plt.figure()
# ax = plt.subplot(121)
# eigenportfolio.plot(kind='bar', ax=ax, legend=False)
# plt.title("Max E.V. Eigenportfolio")
# ax = plt.subplot(122)
# eigenportfolio2.plot(kind='bar', ax=ax, legend=False)
# plt.title("2nd E.V. Eigenportfolio")
# plt.show()
in_sample_ind = | np.arange(0, (returns.shape[0]-training_period+1)) | numpy.arange |
import numpy as np
import torch
# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image
# Copyright (C) 2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def empty(self):
count = 0
for i in range(self.num_kp):
count += len(self.data[i])
return count == 0
def feed(self, keypoint_gt, keypoint_pred, keypoint_vis=None):
"""
Used to feed data to the class.
Stores the euclidean distance between gt and pred, when it is visible.
"""
if isinstance(keypoint_gt, torch.Tensor):
keypoint_gt = keypoint_gt.detach().cpu().numpy()
if isinstance(keypoint_pred, torch.Tensor):
keypoint_pred = keypoint_pred.detach().cpu().numpy()
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
if keypoint_vis is None:
keypoint_vis = np.ones_like(keypoint_gt[:, 0])
keypoint_vis = | np.squeeze(keypoint_vis) | numpy.squeeze |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the various reporter store implementations."""
from collections import OrderedDict
import hypothesis.strategies as st
import numpy as np
from hypothesis import given
import pytest
import tests.uv.util.test_init as ti
import uv.reporter.store as rs
@given(st.dictionaries(st.text(min_size=1), st.integers()))
def test_lambda_reporter(m):
# once we push all values into the store we expect metric => singleton.
wrapped = {k: [v] for k, v in m.items()}
double_wrapped = {k: [v, v] for k, v in m.items()}
# make a memory reporter and a paired reader.
mem = rs.MemoryReporter()
reader = mem.reader()
# these lambda reporters write to the backing memory store.
r_reporter = rs.LambdaReporter(report=mem.report)
ra_reporter = rs.LambdaReporter(report_all=mem.report_all)
# report it ALL and check that everything made it in.
ra_reporter.report_all(0, m)
assert reader.read_all(m.keys()) == wrapped
# now use ra_reporter's report method, which should delegate to report_all:
for k, v in m.items():
ra_reporter.report(0, k, v)
# now we should have two copies for each key.
assert reader.read_all(m.keys()) == double_wrapped
# clear and confirm that everything is empty:
mem.clear()
assert reader.read_all(m.keys()) == {k: [] for k in m.keys()}
# do the same thing again, but using the report interface this time.
for k, v in m.items():
r_reporter.report(0, k, v)
assert reader.read_all(m.keys()) == wrapped
# same thing as before, we check the report_all implementation when we only
# supply a report function.
r_reporter.report_all(0, m)
assert reader.read_all(m.keys()) == double_wrapped
def test_lambda_reporter_errors():
"""The close function works, and you have to supply all required args."""
# You have to supply a report or report_all fn.
with pytest.raises(ValueError):
rs.LambdaReporter()
def explode():
raise IOError("Don't close me!")
report = rs.LambdaReporter(report=lambda _: None, close=explode)
with pytest.raises(IOError):
report.close()
def test_logging_reporter():
"""Check that the LoggingReporter actually logs out properly."""
mem = ti.MemFile()
reporter = rs.LoggingReporter(file=mem)
# reporter handles non-native types like float32 just fine.
reporter.report(0, "a", np.float32(1))
# compound logging.
m = OrderedDict([("a", 2), ("b", "cake")])
reporter.report_all(1, m)
# all items have been logged out.
assert mem.items() == [
'Step 0: a = 1.000', '\n', 'Step 1: a = 2.000, b = cake', '\n'
]
def test_logging_reporter_types():
mem = ti.MemFile()
reporter = rs.LoggingReporter(file=mem)
v = | np.array([1, 2, 3]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from base import *
def load_baseline():
fname = 'results.txt'
res = dict()
with open(fname, 'rt') as f:
f.readline()
for line in f:
a = line.split()
res[a[0]] = [float(i.split('+')[0]) for i in a[1:]]
return res
def load_wer(name):
with open(name, 'rt') as f:
for line in f:
if line.split():
labels = line.split()
break
values = dict()
for line in f:
a = line.split()
for i, v in enumerate(a):
try:
v = float(v)
label = labels[i]
except:
label = 'epoch'
v = float(v[5:])
if label not in values:
values[label] = []
values[label].append(v)
return values
# workdirs = ['.',
# '/mnt/workspace2/wangbin/server12_work/TRF-NN-tensorflow/egs/ptb_chime4test/local',
# '/mnt/workspace/wangbin/server9_work/TRF-NN-tensorflow/egs/ptb_chime4test/local']
workdirs = ['.']
def search_file(name):
for workdir in workdirs:
s = os.path.join(workdir, name)
if wb.exists(s):
print('load %s' % s)
return s
raise TypeError('Can not find file: %s' % name)
logs = [
'train1000/crf/crf_blstm_cnn_we100_ce100_c2wrnn_dropout0.5_adam',
'train1000/trf_noise1.0_blstm_cnn_we200_ce100_c2wrnn',
'train5000/crf/crf_blstm_cnn_we100_ce100_c2wrnn_dropout0.5_adam',
'train5000/trf_noise1.0_blstm_cnn_we200_ce100_c2wrnn'
]
baseline_name = ['KN5_00000']
colors = ['r', 'g', 'b', 'k', 'c', 'y']
baseline = wb.FRes('../full/results.txt')
def smooth(a, width=100):
b = | np.array(a) | numpy.array |
import os
import copy
import glob
import numpy as np
from gains import Absorber
import corner
from utils import (fit_2d_gmm, vcomplex, nested_ddict, make_ellipses,
baselines_2_ants, find_outliers_2d_mincov,
find_outliers_2d_dbscan, find_outliers_dbscan, fit_kde,
fit_2d_kde, hdi_of_mcmc, hdi_of_sample, bc_endpoint, ants_2_baselines)
import matplotlib
from uv_data import UVData
from from_fits import create_model_from_fits_file
from model import Model
from spydiff import import_difmap_model, modelfit_difmap
from spydiff import modelfit_difmap
matplotlib.use('Agg')
label_size = 12
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
def xy_2_rtheta(params):
flux, x, y = params[:3]
r = np.sqrt(x ** 2 + y ** 2)
theta = np.rad2deg(np.arctan(x / y))
result = [flux, r, theta]
try:
result.extend(params[3:])
except IndexError:
pass
return result
def boot_ci(boot_images, original_image, cred_mass=0.68, kind=None):
"""
Calculate bootstrap CI.
:param boot_images:
Iterable of 2D numpy arrays with bootstrapped images.
:param original_image:
2D numpy array with original image.
:param kind: (optional)
Type of CI. "asym", "bc" or None. If ``None`` than symmetric one.
(default: ``None``)
:return:
Two numpy arrays with low and high CI borders for each pixel.
"""
images_cube = np.dstack(boot_images)
boot_ci = np.zeros(np.shape(images_cube[:, :, 0]))
mean_boot = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_0 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_1 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_low = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_high = np.zeros(np.shape(images_cube[:, :, 0]))
alpha = 1 - cred_mass
print("calculating CI intervals")
if kind == "bc":
for (x, y), value in np.ndenumerate(boot_ci):
hdi_low[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], alpha/2.)
hdi_high[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], 1-alpha/2.)
else:
for (x, y), value in np.ndenumerate(boot_ci):
hdi = hdi_of_sample(images_cube[x, y, :], cred_mass=cred_mass)
boot_ci[x, y] = hdi[1] - hdi[0]
hdi_0[x, y] = hdi[0]
hdi_1[x, y] = hdi[1]
mean_boot[x, y] = np.mean(images_cube[x, y, :])
if kind == 'asym':
hdi_low = original_image - (mean_boot - hdi_0)
hdi_high = original_image + hdi_1 - mean_boot
else:
hdi_low = original_image - boot_ci / 2.
hdi_high = original_image + boot_ci / 2.
return hdi_low, hdi_high
def analyze_bootstrap_samples(dfm_model_fname, booted_mdl_paths,
dfm_model_dir=None, plot_comps=None,
plot_file=None, txt_file=None, cred_mass=0.68,
coordinates='xy', out_samples_path=None,
limits=None, fig=None):
"""
Plot bootstrap distribution of model component parameters.
:param dfm_model_fname:
File name of original difmap model.
:param booted_mdl_paths:
Iterable of paths to bootstrapped difmap models.
:param dfm_model_dir: (optional)
Directory with original difmap model. If ``None`` then CWD. (default:
``None``)
:param plot_comps: (optional)
Iterable of components number to plot on same plot. If ``None`` then
plot parameter distributions of all components.
:param plot_file: (optional)
File to save picture. If ``None`` then don't save picture. (default:
``None``)
:param txt_file: (optional)
File to save credible intervals for parameters. If ``None`` then don't
save credible intervals. (default: ``None``)
:param cred_mass: (optional)
Value of credible interval mass. Float in range (0., 1.). (default:
``0.68``)
:param coordinates: (optional)
Type of coordinates to use. ``xy`` or ``rtheta``. (default: ``xy``)
"""
n_boot = len(booted_mdl_paths)
# Get params of initial model used for bootstrap
comps_orig = import_difmap_model(dfm_model_fname, dfm_model_dir)
comps_params0 = {i: [] for i in range(len(comps_orig))}
for i, comp in enumerate(comps_orig):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params0[i].extend(list(params))
# Load bootstrap models
comps_params = {i: [] for i in range(len(comps_orig))}
for booted_mdl_path in booted_mdl_paths:
path, booted_mdl_file = os.path.split(booted_mdl_path)
comps = import_difmap_model(booted_mdl_file, path)
for i, comp in enumerate(comps):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params[i].extend(list(params))
comps_to_plot = [comps_orig[k] for k in plot_comps]
# (#boot, #parameters)
boot_data = np.hstack(np.array(comps_params[i]).reshape((n_boot,
comps_orig[i].size)) for
i in plot_comps)
# Save all bootstrap samples to file optionally
if out_samples_path:
boot_data_all = np.hstack(np.array(comps_params[i]).reshape((n_boot,
comps_orig[i].size)) for
i in range(len(comps_orig)))
np.savetxt(out_samples_path, boot_data_all)
# Optionally plot
figure = None
if plot_file:
if corner:
lens = list(np.cumsum([comp.size for comp in comps_orig]))
lens.insert(0, 0)
labels = list()
for comp in comps_to_plot:
for lab in np.array(comp._parnames)[~comp._fixed]:
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'rtheta':
if lab == 'x':
lab = 'r'
if lab == 'y':
lab = 'theta'
elif coordinates == 'xy':
pass
else:
raise Exception
labels.append(r'' + '$' + lab + '$')
try:
n = sum([c.size for c in comps_to_plot])
if fig is None:
fig, axes = matplotlib.pyplot.subplots(nrows=n, ncols=n)
fig.set_size_inches(16.5, 16.5)
corner.corner(boot_data, labels=labels, plot_contours=True,
plot_datapoints=False, color='gray',
levels=[0.68,0.95],
# smooth=0.5,
# bins=20,
# fill_contours=True,
# range=limits,
truths=np.hstack([comps_params0[i] for i in
plot_comps]),
title_kwargs={"fontsize": 14},
label_kwargs={"fontsize": 14},
quantiles=[0.16, 0.5, 0.84], fig=fig,
# show_titles=True,
hist_kwargs={'normed': True,
'histtype': 'step',
'stacked': True,
'ls': 'solid'},
title_fmt=".4f", max_n_ticks=3)
# figure.gca().annotate("Components {}".format(plot_comps),
# xy=(0.5, 1.0),
# xycoords="figure fraction",
# xytext=(0, -5),
# textcoords="offset points", ha="center",
# va="top")
# figure.savefig(plot_file, format='eps', dpi=600)
except (ValueError, RuntimeError) as e:
with open(plot_file + '_failed_plot', 'w'):
print("Failed to plot... ValueError")
else:
print("Install ``corner`` for corner-plots")
if txt_file:
# Print credible intervals
fn = open(txt_file, 'w')
fn.write("# parameter original.value low.boot high.boot mean.boot"
" median.boot (mean-low).boot (high-mean).boot\n")
recorded = 0
for i in plot_comps:
comp = comps_orig[i]
for j in range(comp.size):
low, high, mean, median = hdi_of_mcmc(boot_data[:, recorded+j],
cred_mass=cred_mass,
return_mean_median=True)
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
parnames = comp._parnames
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
parnames[1] = 'r'
parnames[2] = 'theta'
else:
raise Exception
fn.write("{:<4} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}"
" {:.6f}".format(parnames[j], params[j], low,
high, mean, median, abs(median - low),
abs(high - median)))
fn.write("\n")
recorded += (j + 1)
fn.close()
return fig
# TODO: Check that numbering of bootstrapped data and their models is OK
def bootstrap_uvfits_with_difmap_model(uv_fits_path, dfm_model_path,
nonparametric=False, use_kde=True,
use_v=False, n_boot=100, stokes='I',
boot_dir=None, recenter=True,
clean_after=True,
out_txt_file='txt.txt',
out_plot_file='plot.png',
pairs=False, niter=100,
bootstrapped_uv_fits=None,
additional_noise=None,
out_rchisq_file=None):
dfm_model_dir, dfm_model_fname = os.path.split(dfm_model_path)
comps = import_difmap_model(dfm_model_fname, dfm_model_dir)
if boot_dir is None:
boot_dir = os.getcwd()
if bootstrapped_uv_fits is None:
uvdata = UVData(uv_fits_path)
model = Model(stokes=stokes)
model.add_components(*comps)
boot = CleanBootstrap([model], uvdata, additional_noise=additional_noise)
os.chdir(boot_dir)
boot.run(nonparametric=nonparametric, use_kde=use_kde, recenter=recenter,
use_v=use_v, n=n_boot, pairs=pairs)
bootstrapped_uv_fits = sorted(glob.glob(os.path.join(boot_dir,
'bootstrapped_data*.fits')))
out_rchisq = list()
for j, bootstrapped_fits in enumerate(bootstrapped_uv_fits):
rchisq = modelfit_difmap(bootstrapped_fits, dfm_model_fname,
'mdl_booted_{}.mdl'.format(j),
path=boot_dir, mdl_path=dfm_model_dir,
out_path=boot_dir, niter=niter,
show_difmap_output=True)
out_rchisq.append(rchisq)
print("Finished modelfit of {}th bootstrapped data with with"
" RChiSq = {}".format(j, rchisq))
if out_rchisq_file is not None:
np.savetxt(out_rchisq_file, np.array(out_rchisq))
booted_mdl_paths = glob.glob(os.path.join(boot_dir, 'mdl_booted*'))
fig = analyze_bootstrap_samples(dfm_model_fname, booted_mdl_paths, dfm_model_dir,
plot_comps=range(len(comps)),
plot_file=out_plot_file, txt_file=out_txt_file)
# Clean
if clean_after:
for file_ in bootstrapped_uv_fits:
os.unlink(file_)
for file_ in booted_mdl_paths:
os.unlink(file_)
return fig
def create_random_D_dict(uvdata, sigma_D):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param sigma_D:
D-terms residual noise or mapping from antenna names to residual D-term std.
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"]
"""
import collections
d_dict = dict()
for ant in list(uvdata.antenna_mapping.values()):
d_dict[ant] = dict()
for band in range(uvdata.nif):
d_dict[ant][band] = dict()
for pol in ("R", "L"):
# Generating two random complex numbers near (0, 0)
if isinstance(sigma_D, collections.Mapping):
rands = np.random.normal(loc=0, scale=sigma_D[ant], size=2)
else:
rands = np.random.normal(loc=0, scale=sigma_D, size=2)
d_dict[ant][band][pol] = rands[0]+1j*rands[1]
return d_dict
# TODO: Workaround if no antenna/pol/IF informtation is available from dict
def create_const_amp_D_dict(uvdata, amp_D, per_antenna=True):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param amp_D:
D-terms amplitude. Float or mappable with keys [antenna] or
[antenna][pol][IF] (depending on ``per_antenna``) and values - residual
D-term amplitude.
:param per_antenna: (optional)
Boolean. If ``amp_D`` mapping from antenna to Ds or full (IF/pol)?
(default: ``True``)
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"] and values -
D-terms.
"""
import collections
d_dict = dict()
for ant in list(uvdata.antenna_mapping.values()):
d_dict[ant] = dict()
for band in range(uvdata.nif):
d_dict[ant][band] = dict()
for pol in ("R", "L"):
# Generating random complex number near (0, 0)
phase = np.random.uniform(-np.pi, np.pi, size=1)[0]
if isinstance(amp_D, collections.Mapping):
if per_antenna:
amp = amp_D[ant]
else:
amp = amp_D[ant][pol][band]
else:
amp = amp_D
d_dict[ant][band][pol] = amp*(np.cos(phase)+1j*np.sin(phase))
return d_dict
def create_const_D_dict(uvdata, amp_D, phase_D):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param amp_D:
D-terms amplitude.
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"]
"""
d_dict = dict()
for baseline in uvdata.baselines:
print(baseline)
ant1, ant2 = baselines_2_ants([baseline])
antname1 = uvdata.antenna_mapping[ant1]
antname2 = uvdata.antenna_mapping[ant2]
d_dict[antname1] = dict()
d_dict[antname2] = dict()
for band in range(uvdata.nif):
d_dict[antname1][band] = dict()
d_dict[antname2][band] = dict()
for pol in ("R", "L"):
# Generating random complex number near (0, 0)
d_dict[antname1][band][pol] = amp_D*(np.cos(phase_D)+1j*np.sin(phase_D))
d_dict[antname2][band][pol] = amp_D*(np.cos(phase_D)+1j*np.sin(phase_D))
return d_dict
# TODO: Add 0.632-estimate of extra-sample error.
class Bootstrap(object):
"""
Basic class for bootstrapping data using specified model.
:param models:
Iterable of ``Model`` subclass instances that represent model used for
bootstrapping.. There should be only one (or zero) model for each stokes
parameter. If there are two, say I-stokes models, then sum them firstly
using ``Model.__add__``.
:param uvdata:
Instance of ``UVData`` class.
"""
def __init__(self, models, uvdata):
self.models = models
self.model_stokes = [model.stokes for model in models]
self.data = uvdata
self.model_data = copy.deepcopy(uvdata)
self.model_data.substitute(models)
self.residuals = self.get_residuals()
self.noise_residuals = None
# Dictionary with keys - baseline, #IF, #Stokes and values - instances
# of ``sklearn.neighbors.KernelDensity`` class fitted on the residuals
# (Re&Im) of key baselines
self._residuals_fits = nested_ddict()
# Dictionary with keys - baseline, #IF, #Stokes and values - instances
# of ``sklearn.neighbors.KernelDensity`` class fitted on the residuals
# (Re&Im) of key baselines
self._residuals_fits_2d = nested_ddict()
# Dictionary with keys - baseline, #scan, #IF, #Stokes and values -
# instances of ``sklearn.neighbors.KernelDensity`` class fitted on the
# residuals (Re&Im)
self._residuals_fits_scans = nested_ddict()
# Dictionary with keys - baselines & values - tuples with centers of
# real & imag residuals for that baseline
self._residuals_centers = nested_ddict()
self._residuals_centers_scans = nested_ddict()
# Dictionary with keys - baseline, #IF, #Stokes and value - boolean
# numpy array with outliers
self._residuals_outliers = nested_ddict()
# Dictionary with keys - baseline, #scan, #IF, #Stokes and value -
# boolean numpy array with outliers
self._residuals_outliers_scans = nested_ddict()
def get_residuals(self):
"""
Implements different residuals calculation.
:return:
Residuals between model and data.
"""
raise NotImplementedError
def plot_residuals_trio(self, outname, split_scans=True, freq_average=False,
IF=None, stokes=['RR']):
if IF is None:
IF = range(self.residuals.nif)
if stokes is None:
stokes = range(self.residuals.nstokes)
else:
stokes_list = list()
for stoke in stokes:
print("Parsing {}".format(stoke))
print(self.residuals.stokes)
stokes_list.append(self.residuals.stokes.index(stoke))
stokes = stokes_list
print("Plotting IFs {}".format(IF))
print("Plotting Stokes {}".format(stokes))
for baseline in self.residuals.baselines:
print(baseline)
ant1, ant2 = baselines_2_ants([baseline])
if split_scans:
try:
for i, indxs in enumerate(self.residuals._indxs_baselines_scans[baseline]):
# Complex (#, #IF, #stokes)
data = self.residuals.uvdata[indxs]
# weights = self.residuals.weights[indxs]
if freq_average:
raise NotImplementedError
# # FIXME: Aberage w/o outliers
# # Complex (#, #stokes)
# data = np.mean(data, axis=1)
# for stoke in stokes:
# # Complex 1D array to plot
# data_ = data[:, stoke]
# fig, axes = matplotlib.pyplot.subplots(nrows=2,
# ncols=2)
# matplotlib.pyplot.rcParams.update({'axes.titlesize':
# 'small'})
# axes[1, 0].plot(data_.real, data_.imag, '.k')
# axes[1, 0].axvline(0.0, lw=0.2, color='g')
# axes[1, 0].axhline(0.0, lw=0.2, color='g')
# axes[0, 0].hist(data_.real, bins=10,
# label="Re {}-{}".format(ant1, ant2),
# color="#4682b4")
# legend = axes[0, 0].legend(fontsize='small')
# axes[0, 0].axvline(0.0, lw=1, color='g')
# axes[1, 1].hist(data_.imag, bins=10, color="#4682b4",
# orientation='horizontal',
# label="Im {}-{}".format(ant1, ant2))
# legend = axes[1, 1].legend(fontsize='small')
# axes[1, 1].axhline(0.0, lw=1, color='g')
# fig.savefig("res_2d_bl{}_st{}_scan_{}".format(baseline, stoke, i),
# bbox_inches='tight', dpi=400)
# matplotlib.pyplot.close()
else:
for IF_ in IF:
for stoke in stokes:
# Complex 1D array to plot
data_ = data[:, IF_, stoke]
# weigths_ = weights[:, IF_, stoke]
# data_pw = data_[weigths_ > 0]
data_pw = data_[self.residuals._pw_indxs[indxs, IF_, stokes]]
data_nw = data_[self.residuals._nw_indxs[indxs, IF_, stokes]]
data_out = data_pw[self._residuals_outliers_scans[baseline][i][IF_][stoke]]
# data_nw = data_[weigths_ <= 0]
fig, axes = matplotlib.pyplot.subplots(nrows=2,
ncols=2)
matplotlib.pyplot.rcParams.update({'axes.titlesize':
'small'})
axes[1, 0].plot(data_.real, data_.imag, '.k')
axes[1, 0].plot(data_nw.real, data_nw.imag, '.', color='orange')
axes[1, 0].plot(data_out.real, data_out.imag, '.r')
try:
x_c, y_c = self._residuals_centers_scans[baseline][i][IF_][stoke]
axes[1, 0].plot(x_c, y_c, '.y')
except ValueError:
x_c, y_c = 0., 0.
axes[1, 0].axvline(0.0, lw=0.2, color='g')
axes[1, 0].axhline(0.0, lw=0.2, color='g')
axes[0, 0].hist(data_.real, bins=10,
label="Re "
"{}-{}".format(ant1,
ant2),
color="#4682b4",
histtype='stepfilled',
alpha=0.3,
normed=True)
try:
clf_re = self._residuals_fits_scans[baseline][i][IF_][stoke][0]
sample = np.linspace(np.min(data_.real) - x_c,
np.max(data_.real) - x_c,
1000)
pdf = np.exp(clf_re.score_samples(sample[:, np.newaxis]))
axes[0, 0].plot(sample + x_c, pdf, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[0, 0].legend(fontsize='small')
axes[0, 0].axvline(0.0, lw=1, color='g')
axes[1, 1].hist(data_.imag, bins=10,
color="#4682b4",
orientation='horizontal',
histtype='stepfilled',
alpha=0.3, normed=True,
label="Im "
"{}-{}".format(ant1,
ant2))
try:
clf_im = self._residuals_fits_scans[baseline][i][IF_][stoke][1]
sample = np.linspace(np.min(data_.imag) + y_c,
np.max(data_.imag) + y_c,
1000)
pdf = np.exp(clf_im.score_samples(sample[:, np.newaxis]))
axes[1, 1].plot(pdf, sample - y_c, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[1, 1].legend(fontsize='small')
axes[1, 1].axhline(0.0, lw=1, color='g')
fig.savefig("{}_ant1_{}_ant2_{}_stokes_{}_IF_{}_scan_{}.png".format(outname,
ant1, ant2, self.residuals.stokes[stoke],
IF_, i), bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
# If ``self.residuals._indxs_baselines_scans[baseline] = None``
except TypeError:
continue
else:
indxs = self.residuals._indxs_baselines[baseline]
# Complex (#, #IF, #stokes)
data = self.residuals.uvdata[indxs]
# weights = self.residuals.weights[indxs]
if freq_average:
raise NotImplementedError
else:
for IF_ in IF:
for stoke in stokes:
print("Stokes {}".format(stoke))
# Complex 1D array to plot
data_ = data[:, IF_, stoke]
# weigths_ = weights[:, IF_, stoke]
# data_pw = data_[weigths_ > 0]
data_pw = data_[self.residuals._pw_indxs[indxs, IF_, stoke]]
data_nw = data_[self.residuals._nw_indxs[indxs, IF_, stoke]]
data_out = data_pw[self._residuals_outliers[baseline][IF_][stoke]]
# data_nw = data_[weigths_ <= 0]
fig, axes = matplotlib.pyplot.subplots(nrows=2,
ncols=2)
matplotlib.pyplot.rcParams.update({'axes.titlesize':
'small'})
axes[1, 0].plot(data_.real, data_.imag, '.k')
axes[1, 0].plot(data_out.real, data_out.imag, '.r')
axes[1, 0].plot(data_nw.real, data_nw.imag, '.', color='orange')
try:
x_c, y_c = self._residuals_centers[baseline][IF_][stoke]
axes[1, 0].plot(x_c, y_c, '.y')
except ValueError:
x_c, y_c = 0., 0.
axes[1, 0].axvline(0.0, lw=0.2, color='g')
axes[1, 0].axhline(0.0, lw=0.2, color='g')
axes[0, 0].hist(data_.real, bins=20,
label="Re {}-{}".format(ant1, ant2),
color="#4682b4",
histtype='stepfilled', alpha=0.3,
normed=True)
try:
clf_re = self._residuals_fits[baseline][IF_][stoke][0]
sample = np.linspace(np.min(data_.real) - x_c,
np.max(data_.real) - x_c,
1000)
pdf = np.exp(clf_re.score_samples(sample[:, np.newaxis]))
axes[0, 0].plot(sample + x_c, pdf, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[0, 0].legend(fontsize='small')
axes[0, 0].axvline(0.0, lw=1, color='g')
axes[1, 1].hist(data_.imag, bins=20,
color="#4682b4",
orientation='horizontal',
histtype='stepfilled', alpha=0.3,
normed=True,
label="Im {}-{}".format(ant1, ant2))
try:
clf_im = self._residuals_fits[baseline][IF_][stoke][1]
sample = np.linspace(np.min(data_.imag) + y_c,
np.max(data_.imag) + y_c,
1000)
pdf = np.exp(clf_im.score_samples(sample[:, np.newaxis]))
axes[1, 1].plot(pdf, sample - y_c, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[1, 1].legend(fontsize='small')
axes[1, 1].axhline(0.0, lw=1, color='g')
fig.savefig("{}_ant1_{}_ant2_{}_stokes_{}_IF_{}.png".format(outname,
ant1, ant2, self.residuals.stokes[stoke], IF_),
bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
def find_outliers_in_residuals(self, split_scans=False):
"""
Method that search outliers in residuals
:param split_scans:
Boolean. Find outliers on each scan separately?
"""
print("Searching for outliers in residuals...")
for baseline in self.residuals.baselines:
indxs = self.residuals._indxs_baselines[baseline]
baseline_data = self.residuals.uvdata[indxs]
# If searching outliers in baseline data
if not split_scans:
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
# Complex array with visibilities for given baseline,
# #IF, Stokes
data = baseline_data[:, if_, stokes]
# weigths = self.residuals.weights[indxs, if_, stokes]
# Use only valid data with positive weight
data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
data_nw = data[self.residuals._nw_indxs[indxs, if_, stokes]]
print("NW {}".format(np.count_nonzero(data_nw)))
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, IF {}, Stokes {}".format(baseline,
if_,
stokes))
outliers_re = find_outliers_dbscan(data_pw.real, 1., 5)
outliers_im = find_outliers_dbscan(data_pw.imag, 1., 5)
outliers_1d = np.logical_or(outliers_re, outliers_im)
outliers_2d = find_outliers_2d_dbscan(data_pw, 1.5, 5)
self._residuals_outliers[baseline][if_][stokes] =\
np.logical_or(outliers_1d, outliers_2d)
# If searching outliers on each scan
else:
# Searching each scan on current baseline
# FIXME: Use zero centers for shitty scans?
if self.residuals.scans_bl[baseline] is None:
continue
for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
scan_uvdata = self.residuals.uvdata[scan_indxs]
for if_ in range(scan_uvdata.shape[1]):
for stokes in range(scan_uvdata.shape[2]):
# Complex array with visibilities for given
# baseline, #scan, #IF, Stokes
data = scan_uvdata[:, if_, stokes]
# weigths = self.residuals.weights[scan_indxs, if_,
# stokes]
# Use only valid data with positive weight
data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
data_nw = data[self.residuals._nw_indxs[scan_indxs, if_, stokes]]
print("NW {}".format(np.count_nonzero(data_nw)))
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, scan {}, IF {}," \
" Stokes {}".format(baseline, i, if_, stokes))
outliers_re = find_outliers_dbscan(data_pw.real, 1., 5)
outliers_im = find_outliers_dbscan(data_pw.imag, 1., 5)
outliers_1d = np.logical_or(outliers_re, outliers_im)
outliers_2d = find_outliers_2d_dbscan(data_pw, 1.5, 5)
self._residuals_outliers_scans[baseline][i][if_][stokes] = \
np.logical_or(outliers_1d, outliers_2d)
# TODO: Use only data without outliers
def find_residuals_centers(self, split_scans):
"""
Calculate centers of residuals for each baseline[/scan]/IF/stokes.
"""
print("Finding centers")
for baseline in self.residuals.baselines:
# Find centers for baselines only
if not split_scans:
indxs = self.residuals._indxs_baselines[baseline]
baseline_data = self.residuals.uvdata[indxs]
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
data = baseline_data[:, if_, stokes]
# weigths = self.residuals.weights[indxs, if_, stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
# data_nw = data[self.residuals._nw_indxs[indxs, if_, stokes]]
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, IF {}, Stokes {}".format(baseline, if_,
stokes))
outliers = self._residuals_outliers[baseline][if_][stokes]
x_c = np.sum(data_pw.real[~outliers]) / np.count_nonzero(~outliers)
y_c = np.sum(data_pw.imag[~outliers]) / np.count_nonzero(~outliers)
print("Center: ({:.4f}, {:.4f})".format(x_c, y_c))
self._residuals_centers[baseline][if_][stokes] = (x_c, y_c)
# Find residuals centers on each scan
else:
# Searching each scan on current baseline
# FIXME: Use zero centers for shitty scans?
if self.residuals.scans_bl[baseline] is None:
continue
for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
scan_uvdata = self.residuals.uvdata[scan_indxs]
for if_ in range(scan_uvdata.shape[1]):
for stokes in range(scan_uvdata.shape[2]):
data = scan_uvdata[:, if_, stokes]
# weigths = self.residuals.weights[scan_indxs, if_,
# stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, #scan {}, IF {}," \
" Stokes {}".format(baseline, i, if_, stokes))
outliers = self._residuals_outliers_scans[baseline][i][if_][stokes]
x_c = np.sum(data_pw.real[~outliers]) / np.count_nonzero(~outliers)
y_c = np.sum(data_pw.imag[~outliers]) / np.count_nonzero(~outliers)
print("Center: ({:.4f}, {:.4f})".format(x_c, y_c))
self._residuals_centers_scans[baseline][i][if_][stokes] = (x_c, y_c)
# FIXME: Use real Stokes parameters as keys.
def fit_residuals_gmm(self):
"""
Fit residuals with Gaussian Mixture Model.
:note:
At each baseline residuals are fitted with Gaussian Mixture Model
where number of mixture components is chosen based on BIC.
"""
for baseline in self.residuals.baselines:
baseline_data, _ = \
self.residuals._choose_uvdata(baselines=[baseline])
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
data = baseline_data[:, if_, stokes]
# If data are zeros
if not np.any(data):
continue
print("Baseline {}, IF {}, Stokes {}".format(baseline, if_,
stokes))
print("Shape: {}".format(baseline_data.shape))
try:
clf = fit_2d_gmm(data)
# This occurs when baseline has 1 point only
except ValueError:
continue
self._residuals_fits[baseline][if_][stokes] = clf
# FIXME: Use real Stokes parameters as keys.
def fit_residuals_kde(self, split_scans, combine_scans, recenter):
"""
Fit residuals with Gaussian Kernel Density.
:param split_scans:
Boolean. Fit to each scan of baseline independently?
:param combine_scans:
Boolean. Combine re-centered scans on each baseline before fit?
:param recenter:
Boolean. Recenter residuals before fit?
:note:
At each baseline/scan residuals are fitted with Kernel Density
Model.
"""
print("Fitting residuals")
if combine_scans:
raise NotImplementedError
for baseline in self.residuals.baselines:
# If fitting baseline data
if not split_scans:
indxs = self.residuals._indxs_baselines[baseline]
baseline_data = self.residuals.uvdata[indxs]
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
data = baseline_data[:, if_, stokes]
# weigths = self.residuals.weights[indxs, if_, stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
# If data are zeros
if not np.any(data_pw):
continue
# Don't count outliers
data_pw = data_pw[~self._residuals_outliers[baseline][if_][stokes]]
print("Baseline {}, IF {}, Stokes {}".format(baseline, if_,
stokes))
if recenter:
x_c, y_c = self._residuals_centers[baseline][if_][stokes]
data_pw -= x_c + 1j * y_c
try:
clf_re = fit_kde(data_pw.real)
clf_im = fit_kde(data_pw.imag)
# This occurs when baseline has 1 point only
except ValueError:
continue
self._residuals_fits[baseline][if_][stokes] = (clf_re,
clf_im)
# If fitting each scan independently
else:
if self.residuals.scans_bl[baseline] is None:
continue
for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
scan_uvdata = self.residuals.uvdata[scan_indxs]
for if_ in range(scan_uvdata.shape[1]):
for stokes in range(scan_uvdata.shape[2]):
data = scan_uvdata[:, if_, stokes]
# weigths = self.residuals.weights[scan_indxs, if_, stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
# If data are zeros
if not np.any(data_pw):
continue
# Don't count outliers
data_pw = data_pw[~self._residuals_outliers_scans[baseline][i][if_][stokes]]
print("Baseline {}, Scan {}, IF {}, Stokes" \
" {}".format(baseline, i, if_, stokes))
if recenter:
x_c, y_c = self._residuals_centers_scans[baseline][i][if_][stokes]
data_pw -= x_c - 1j * y_c
try:
clf_re = fit_kde(data_pw.real)
clf_im = fit_kde(data_pw.imag)
# This occurs when scan has 1 point only
except ValueError:
continue
self._residuals_fits_scans[baseline][i][if_][stokes] = (clf_re, clf_im)
# # FIXME: Use real Stokes parameters as keys.
# def fit_residuals_kde_2d(self, split_scans, combine_scans, recenter):
# """
# Fit residuals with Gaussian Kernel Density.
# :param split_scans:
# Boolean. Fit to each scan of baseline independently?
# :param combine_scans:
# Boolean. Combine re-centered scans on each baseline before fit?
# :param recenter:
# Boolean. Recenter residuals before fit?
# :note:
# At each baseline/scan residuals are fitted with Kernel Density
# Model.
# """
# print "Fitting residuals"
# if combine_scans:
# raise NotImplementedError
# for baseline in self.residuals.baselines:
# # If fitting baseline data
# if not split_scans:
# indxs = self.residuals._indxs_baselines[baseline]
# baseline_data = self.residuals.uvdata[indxs]
# for if_ in range(baseline_data.shape[1]):
# for stokes in range(baseline_data.shape[2]):
# data = baseline_data[:, if_, stokes]
# # weigths = self.residuals.weights[indxs, if_, stokes]
# # Use only valid data with positive weight
# # data_pw = data[weigths > 0]
# data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
# # If data are zeros
# if not np.any(data_pw):
# continue
# # Don't count outliers
# data_pw = data_pw[~self._residuals_outliers[baseline][if_][stokes]]
# print "Baseline {}, IF {}, Stokes {}".format(baseline, if_,
# stokes)
# if recenter:
# x_c, y_c = self._residuals_centers[baseline][if_][stokes]
# data_pw -= x_c - 1j * y_c
# try:
# clf = fit_2d_kde(data_pw)
# # This occurs when baseline has 1 point only
# except ValueError:
# continue
# self._residuals_fits[baseline][if_][stokes] = clf
# # If fitting each scan independently
# else:
# if self.residuals.scans_bl[baseline] is None:
# continue
# for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
# scan_uvdata = self.residuals.uvdata[scan_indxs]
# for if_ in range(scan_uvdata.shape[1]):
# for stokes in range(scan_uvdata.shape[2]):
# data = scan_uvdata[:, if_, stokes]
# # weigths = self.residuals.weights[scan_indxs, if_, stokes]
# # Use only valid data with positive weight
# # data_pw = data[weigths > 0]
# data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
# # If data are zeros
# if not np.any(data_pw):
# continue
# # Don't count outliers
# data_pw = data_pw[~self._residuals_outliers_scans[baseline][i][if_][stokes]]
# print "Baseline {}, Scan {}, IF {}, Stokes" \
# " {}".format(baseline, i, if_, stokes)
# if recenter:
# x_c, y_c = self._residuals_centers_scans[baseline][i][if_][stokes]
# data_pw -= x_c - 1j * y_c
# try:
# clf = fit_2d_kde(data_pw)
# # This occurs when scan has 1 point only
# except ValueError:
# continue
# self._residuals_fits_scans[baseline][i][if_][stokes] = clf_re
def get_residuals_noise(self, split_scans, use_V):
"""
Estimate noise of the residuals using stokes V or successive
differences approach. For each baseline or even scan.
:param split_scans:
Boolean. Estimate noise std for each baseline scan individually?
:param use_V:
Boolean. Use Stokes V visibilities to estimate noise std?
:return:
Dictionary with keys - baseline numbers & values - arrays of shape
([#scans], #IF, #stokes). First dimension is #scans if option
``split_scans=True`` is used.
"""
# Dictionary with keys - baseline numbers & values - arrays of shape
# ([#scans], #IF, [#stokes]). It means (#scans, #IF) if
# ``split_scans=True`` & ``use_V=True``, (#IF, #stokes) if
# ``split_scans=False`` & ``use_V=False`` etc.
noise_residuals = self.residuals.noise(split_scans=split_scans,
use_V=use_V)
print("Getting noise residuals ", noise_residuals)
# To make ``noise_residuals`` shape ([#scans], #IF, #stokes) for
# ``use_V=True`` option.
if use_V:
nstokes = self.residuals.nstokes
for key, value in noise_residuals.items():
print("key", key)
print("value", np.shape(value))
shape = list(np.shape(value))
shape.extend([nstokes])
value = np.tile(value, nstokes)
value = value.reshape(shape)
noise_residuals[key] = value
return noise_residuals
def plot_residuals(self, save_file, vis_range=None, ticks=None,
stokes='I'):
"""
Plot histograms of the residuals.
:param save_file:
File to save plot.
:param vis_range: (optional)
Iterable of min & max range for plotting residuals Re & Im.
Eg. ``[-0.15, 0.15]``. If ``None`` then choose one from data.
(default: ``None``)
:param ticks: (optional)
Iterable of X-axis ticks to plot. Eg. ``[-0.1, 0.1]``. If ``None``
then choose one from data. (default: ``None``)
:param stokes:
Stokes parameter to plot. (default: ``I``)
"""
uvdata_r = self.residuals
nrows = int(np.ceil(np.sqrt(2. * len(uvdata_r.baselines))))
# Optionally choose range & ticks
if vis_range is None:
res = uvdata_r._choose_uvdata(stokes=stokes, freq_average=True)
range_ = min(abs(np.array([max(res.real), max(res.imag),
min(res.real), min(res.imag)])))
range_ = float("{:.3f}".format(range_))
vis_range = [-range_, range_]
print("vis_range", vis_range)
if ticks is None:
tick = min(abs(np.array(vis_range)))
tick = float("{:.3f}".format(tick / 2.))
ticks = [-tick, tick]
print("ticks", ticks)
fig, axes = matplotlib.pyplot.subplots(nrows=nrows, ncols=nrows,
sharex=True, sharey=True)
fig.set_size_inches(18.5, 18.5)
matplotlib.pyplot.rcParams.update({'axes.titlesize': 'small'})
i, j = 0, 0
for baseline in uvdata_r.baselines:
try:
res = uvdata_r._choose_uvdata(baselines=[baseline],
freq_average=True,
stokes=stokes)
bins = min([10, np.sqrt(len(res.imag))])
ant1, ant2 = baselines_2_ants([baseline])
axes[i, j].hist(res.real, range=vis_range, color="#4682b4",
label="Re {}-{}".format(ant1, ant2))
axes[i, j].axvline(0.0, lw=1, color='r')
axes[i, j].set_xticks(ticks)
legend = axes[i, j].legend(fontsize='small')
j += 1
# Plot first row first
if j // nrows > 0:
# Then second row, etc...
i += 1
j = 0
bins = min([10, np.sqrt(len(res.imag))])
axes[i, j].hist(res.imag, range=vis_range, color="#4682b4",
label="Im {}-{}".format(ant1, ant2))
legend = axes[i, j].legend(fontsize='small')
axes[i, j].axvline(0.0, lw=1, color='r')
axes[i, j].set_xticks(ticks)
j += 1
# Plot first row first
if j // nrows > 0:
# Then second row, etc...
i += 1
j = 0
except IndexError:
break
fig.savefig("{}".format(save_file), bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
def plot_residuals_2d(self, vis_range=None, ticks=None):
"""
Plot 2D distribution of complex residuals.
:param vis_range: (optional)
Iterable of min & max range for plotting residuals Re & Im.
Eg. ``[-0.15, 0.15]``. If ``None`` then choose one from data.
(default: ``None``)
:param ticks: (optional)
Iterable of X-axis ticks to plot. Eg. ``[-0.1, 0.1]``. If ``None``
then choose one from data. (default: ``None``)
"""
uvdata_r = self.residuals
for baseline in uvdata_r.baselines:
# n_if = self._residuals_fits[baseline]
# n_stokes = self._residuals_fits[baseline]
nrows = 4
fig, axes = matplotlib.pyplot.subplots(nrows=4,
ncols=4,
sharex=True,
sharey=True)
i, j = 0, 0
fig.set_size_inches(18.5, 18.5)
matplotlib.pyplot.rcParams.update({'axes.titlesize':
'small'})
n_if = len(self._residuals_fits[baseline].keys())
for if_ in self._residuals_fits[baseline].keys():
n_stokes = len([val for val in
self._residuals_fits[baseline][if_].values() if
val is not None])
for stoke in self._residuals_fits[baseline][if_].keys():
stoke_par = uvdata_r.stokes[stoke]
try:
clf = self._residuals_fits[baseline][if_][stoke]
if clf is None:
# No fitted residuals for this IF/Stokes
continue
res = uvdata_r._choose_uvdata(baselines=[baseline],
IF=if_+1,
stokes=stoke_par)[0][:, 0]
print("Baseline {}, IF {}, Stokes {}".format(baseline,
if_,
stoke))
print("Shape: {}".format(res.shape))
re = res.real
im = res.imag
reim = np.vstack((re, im)).T
y = clf.predict(reim)
for i_mix in range(clf.n_components):
color = "rgbyk"[i_mix]
re_ = re[np.where(y == i_mix)]
im_ = im[np.where(y == i_mix)]
axes[i, j].scatter(re_, im_, color=color)
make_ellipses(clf, axes[i, j])
# axes[i, j].set_xticks(ticks)
# axes[i, j].set_xlim(vis_range)
# axes[i, j].set_ylim(vis_range)
# axes[i, j].set_xticks(ticks)
# axes[i, j].set_yticks(ticks)
j += 1
# Plot first row first
if j // nrows > 0:
# Then second row, etc...
i += 1
j = 0
except IndexError:
break
fig.savefig("res_2d_{}_{}_{}".format(baseline, if_, stoke),
bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
def resample(self, outname, nonparametric, split_scans, recenter, use_kde,
use_v, combine_scans):
"""
Sample from residuals with replacement or sample from normal random
noise fitted to residuals and add samples to model to form n bootstrap
samples of data.
:param outname:
Output file name to save bootstrapped data.
:param nonparametric (optional):
If ``True`` then use actual residuals between model and data. If
``False`` then use gaussian noise fitted to actual residuals for
parametric bootstrapping. (default: ``False``)
:return:
Just save bootstrapped data to file with specified ``outname``.
"""
raise NotImplementedError
# FIXME: Implement arbitrary output directory for bootstrapped data
def run(self, n, nonparametric, split_scans, recenter, use_kde, use_v,
combine_scans, outname=['bootstrapped_data', '.FITS'],
pairs=False):
"""
Generate ``n`` data sets.
:note:
Several steps are made before re-sampling ``n`` times:
* First, outliers are found for each baseline or even scan (using
DBSCAN clustering algorithm).
* Centers of the residuals for each baselines or optionally
scans (when ``split_scans=True``) are found excluding outliers.
* In parametric bootstrap (when ``nonparameteric=False``) noise
density estimates for each baseline/scan are maid using
``sklearn.neighbors.KernelDensity`` fits to Re & Im re-centered
visibility data with gaussian kernel and bandwidth optimized by
``sklearn.grid_search.GridSearchCV`` with 5-fold CV.
This is when ``use_kde=True``. Otherwise residuals are supposed to
be distributed with gaussian density and it's std is estimated
directly.
Then, in parametric bootstrap re-sampling is maid by adding samples
from fitted KDE (for ``use_kde=True``) or zero-mean Gaussian
distribution with std of the residuals to model visibility data
``n`` times. In non-parametric case re-sampling is maid by sampling
with replacement from re-centered residuals (with outliers
excluded).
"""
# Find outliers in baseline/scan data
if not split_scans:
if not self._residuals_outliers:
print("Finding outliers in baseline's data...")
self.find_outliers_in_residuals(split_scans=False)
else:
print("Already found outliers in baseline's data...")
else:
if not self._residuals_centers_scans:
print("Finding outliers in scan's data...")
self.find_outliers_in_residuals(split_scans=True)
else:
print("Already found outliers in scan's data...")
# Find residuals centers
if recenter:
self.find_residuals_centers(split_scans=split_scans)
if not pairs:
# Fit residuals for parametric case
if not nonparametric:
# Using KDE estimate of residuals density
if use_kde:
print("Using parametric bootstrap")
if not split_scans and not self._residuals_fits:
print("Fitting residuals with KDE for each" \
" baseline/IF/Stokes...")
self.fit_residuals_kde(split_scans=split_scans,
combine_scans=combine_scans,
recenter=recenter)
if split_scans and not self._residuals_fits_scans:
print("Fitting residuals with KDE for each" \
" baseline/scan/IF/Stokes...")
self.fit_residuals_kde(split_scans=split_scans,
combine_scans=combine_scans,
recenter=recenter)
if not split_scans and self._residuals_fits:
print("Residuals were already fitted with KDE on each" \
" baseline/IF/Stokes")
if split_scans and self._residuals_fits_scans:
print("Residuals were already fitted with KDE on each" \
" baseline/scan/IF/Stokes")
# Use parametric gaussian estimate of residuals density
else:
# FIXME: This is needed only for cycle after!!!
self.fit_residuals_kde(split_scans=split_scans,
combine_scans=combine_scans,
recenter=recenter)
print("only for cycle")
if not self.noise_residuals:
print("Estimating gaussian STDs on each baseline[/scan]...")
self.noise_residuals = self.get_residuals_noise(split_scans,
use_v)
else:
print("Gaussian STDs for each baseline[/scan] are already" \
" estimated")
# Resampling is done in subclasses
for i in range(n):
outname_ = outname[0] + '_' + str(i + 1).zfill(3) + outname[1]
self.resample(outname=outname_, nonparametric=nonparametric,
split_scans=split_scans, recenter=recenter,
use_kde=use_kde, use_v=use_v,
combine_scans=combine_scans, pairs=pairs)
class CleanBootstrap(Bootstrap):
"""
Class that implements bootstrapping of uv-data using model and residuals
between data and model. Data are self-calibrated visibilities.
:param models:
Iterable of ``Model`` subclass instances that represent model used for
bootstrapping. There should be only one (or zero) model for each stokes
parameter. If there are two, say I-stokes models, then sum them firstly
using ``Model.__add__``.
:param data:
Path to FITS-file with uv-data (self-calibrated or not).
"""
def __init__(self, models, uvdata, sigma_ampl_scale=None,
additional_noise=None, sigma_dterms=None, sigma_evpa=None):
"""
:param sigma_ampl_scale:
Uncertainty of the overall flux calibration.
:param additional_noise:
Sigma of the additional noise added to all baselines/IFs/Stokes.
I don't remember why i did that.
:param sigma_dterms:
RMS of the residual D-terms. This is not the way how D-terms must
be accounted for in bootstrap. It's just MC-estimate of the
corresponding error.
:param sigma_evpa:
RMS of the EVPA calibration.
"""
super(CleanBootstrap, self).__init__(models, uvdata)
self.sigma_ampl_scale = sigma_ampl_scale
self.additional_noise = additional_noise
if sigma_dterms is not None and 'I' not in self.model_stokes:
raise Exception("To account for D-terms error we need Stokes I to be"
" present in ``models``!")
if sigma_dterms is not None:
self._d_dict = create_random_D_dict(self.data, sigma_dterms)
self.sigma_dterms = sigma_dterms
self.sigma_evpa = sigma_evpa
def get_residuals(self):
return self.data - self.model_data
def resample_baseline_pairs(self, baseline, copy_of_model_data):
# Boolean array that defines indexes of current baseline data
baseline_indxs = self.data._indxs_baselines[baseline]
# FIXME: Here iterate over keys with not None values
for if_ in range(self.data.nif):
for stokes in range(self.data.nstokes):
baseline_indxs_ = baseline_indxs.copy()
# Boolean array that defines indexes of outliers in indexes of
# current baseline data
outliers = self._residuals_outliers[baseline][if_][stokes]
pw_indxs = self.data._pw_indxs[baseline_indxs, if_, stokes]
# If some Stokes parameter has no outliers calculation - pass it
if isinstance(outliers, dict):
continue
# Baseline indexes of inliers
indxs = | np.where(baseline_indxs_) | numpy.where |
from sklearn.model_selection import KFold
from sklearn.utils import shuffle
from sklearn import preprocessing
import pandas as pd
import numpy as np
def KFold_df(df, folds=3):
# kf = KFold(n_splits=folds)
# df = shuffle(df)
#
# for train, test in kf.split(df.index):
# trainData = df.iloc[train]
# testData = df.iloc[test]
# yield trainData, testData
trainData = df.iloc[:-1]
testData = df.iloc[-1:]
yield trainData, testData
def normalize(df):
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(df)
df_normalized = pd.DataFrame(np_scaled, columns=df.columns, index=df.index)
lst_col = df.columns[-1]
df_normalized[lst_col] = df[lst_col]
return df_normalized
def mre_calc(y_predict, y_actual):
mre = []
for predict, actual in zip(y_predict, y_actual):
if actual == 0:
if predict == 0:
mre.append(0)
elif abs(predict) <= 1:
mre.append(1)
else:
mre.append(round(abs(predict - actual)+1 / (actual+1), 3))
else:
mre.append(round(abs(predict - actual) / (actual), 3))
mMRE = | np.median(mre) | numpy.median |
#!/usr/bin python3
import numpy as np
import scipy as sp
import casadi as ca
import pathlib
import os
import copy
import shutil
import pdb
import warnings
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
from typing import List, Dict
from DGSQP.types import VehicleState, VehiclePrediction
from DGSQP.dynamics.dynamics_models import CasadiDecoupledMultiAgentDynamicsModel
from DGSQP.solvers.abstract_solver import AbstractSolver
from DGSQP.solvers.solver_types import IBRParams
class IBR(AbstractSolver):
def __init__(self, joint_dynamics: CasadiDecoupledMultiAgentDynamicsModel,
costs: List[List[ca.Function]],
agent_constraints: List[ca.Function],
shared_constraints: List[ca.Function],
bounds: Dict[str, VehicleState],
params=IBRParams()):
self.joint_dynamics = joint_dynamics
self.M = self.joint_dynamics.n_a
self.N = params.N
self.line_search_iters = params.line_search_iters
self.ibr_iters = params.ibr_iters
self.verbose = params.verbose
self.code_gen = params.code_gen
self.jit = params.jit
self.opt_flag = params.opt_flag
self.solver_name = params.solver_name
if params.solver_dir is not None:
self.solver_dir = os.path.join(params.solver_dir, self.solver_name)
if not params.enable_jacobians:
jac_opts = dict(enable_fd=False, enable_jacobian=False, enable_forward=False, enable_reverse=False)
else:
jac_opts = dict()
if self.code_gen:
if self.jit:
self.options = dict(jit=True, jit_name=self.solver_name, compiler='shell', jit_options=dict(compiler='gcc', flags=['-%s' % self.opt_flag], verbose=self.verbose), **jac_opts)
else:
self.options = dict(jit=False, **jac_opts)
self.c_file_name = self.solver_name + '.c'
self.so_file_name = self.solver_name + '.so'
if params.solver_dir is not None:
self.solver_dir = pathlib.Path(params.solver_dir).expanduser().joinpath(self.solver_name)
else:
self.options = dict(jit=False, **jac_opts)
self.num_qa_d = [int(self.joint_dynamics.dynamics_models[a].n_q) for a in range(self.M)]
self.num_ua_d = [int(self.joint_dynamics.dynamics_models[a].n_u) for a in range(self.M)]
self.num_ua_el = [int(self.N*self.joint_dynamics.dynamics_models[a].n_u) for a in range(self.M)]
# The costs should be a dict of casadi functions with keys 'stage' and 'terminal'
if len(costs) != self.M:
raise ValueError('Number of agents: %i, but %i cost functions were provided' % (self.M, len(costs)))
self.costs_sym = costs
# The constraints should be a list (of length N+1) of casadi functions such that constraints[i] <= 0
# if len(constraints) != self.N+1:
# raise ValueError('Horizon length: %i, but %i constraint functions were provided' % (self.N+1, len(constraints)))
self.constraints_sym = agent_constraints
self.shared_constraints_sym = shared_constraints
# Process box constraints
self.state_ub, self.state_lb, self.input_ub, self.input_lb = [], [], [], []
self.state_ub_idxs, self.state_lb_idxs, self.input_ub_idxs, self.input_lb_idxs = [], [], [], []
for a in range(self.M):
su, iu = self.joint_dynamics.dynamics_models[a].state2qu(bounds['ub'][a])
sl, il = self.joint_dynamics.dynamics_models[a].state2qu(bounds['lb'][a])
self.state_ub.append(su)
self.state_lb.append(sl)
self.input_ub.append(iu)
self.input_lb.append(il)
self.state_ub_idxs.append(np.where(su < np.inf)[0])
self.state_lb_idxs.append(np.where(sl > -np.inf)[0])
self.input_ub_idxs.append(np.where(iu < np.inf)[0])
self.input_lb_idxs.append(np.where(il > -np.inf)[0])
self.n_ca = [[0 for _ in range(self.N+1)] for _ in range(self.M)]
self.n_cbr = [[0 for _ in range(self.N+1)] for _ in range(self.M)]
self.n_cs = [0 for _ in range(self.N+1)]
self.n_c = [0 for _ in range(self.N+1)]
self.state_input_predictions = [VehiclePrediction() for _ in range(self.M)]
self.n_u = self.joint_dynamics.n_u
self.n_q = self.joint_dynamics.n_q
# Convergence tolerance for SQP
self.p_tol = params.p_tol
self.d_tol = params.d_tol
self.alpha = 0.3
self.use_ps = params.use_ps
self.debug_plot = params.debug_plot
self.pause_on_plot = params.pause_on_plot
self.local_pos = params.local_pos
if self.debug_plot:
matplotlib.use('TkAgg')
plt.ion()
self.fig = plt.figure(figsize=(10,5))
self.ax_xy = self.fig.add_subplot(1,2,1)
self.ax_a = self.fig.add_subplot(2,2,2)
self.ax_s = self.fig.add_subplot(2,2,4)
# self.joint_dynamics.dynamics_models[0].track.remove_phase_out()
self.joint_dynamics.dynamics_models[0].track.plot_map(self.ax_xy, close_loop=False)
self.colors = ['b', 'g', 'r', 'm', 'c']
self.l_xy, self.l_a, self.l_s = [], [], []
for i in range(self.M):
self.l_xy.append(self.ax_xy.plot([], [], f'{self.colors[i]}o')[0])
self.l_a.append(self.ax_a.plot([], [], f'-{self.colors[i]}o')[0])
self.l_s.append(self.ax_s.plot([], [], f'-{self.colors[i]}o')[0])
self.ax_a.set_ylabel('accel')
self.ax_s.set_ylabel('steering')
self.fig.canvas.draw()
self.fig.canvas.flush_events()
self.q_pred = np.zeros((self.N+1, self.n_q))
self.u_pred = np.zeros((self.N, self.n_u))
self.q_new = np.zeros((self.N+1, self.n_q))
self.u_new = np.zeros((self.N+1, self.n_u))
self.debug = False
self.u_prev = np.zeros(self.n_u)
if params.solver_dir:
self._load_solver()
else:
self._build_solver()
self.u_ws = [np.zeros((self.N, self.num_ua_d[a])) for a in range(self.M)]
if self.use_ps and self.alpha > 0:
self.l_ws = [np.zeros(np.sum(self.n_c)) if a == 0 else np.zeros(np.sum(self.n_cbr[a])) for a in range(self.M)]
else:
self.l_ws = [np.zeros(np.sum(self.n_cbr[a])) for a in range(self.M)]
self.l_pred = copy.copy(self.l_ws)
self.initialized = True
def initialize(self):
pass
def set_warm_start(self, u_ws: np.ndarray, l_ws: np.ndarray = None):
self.u_ws = u_ws
if l_ws is None:
if self.use_ps and self.alpha > 0:
self.l_ws = [np.zeros(np.sum(self.n_c)) if a == 0 else np.zeros(np.sum(self.n_cbr[a])) for a in range(self.M)]
else:
self.l_ws = [np.zeros(np.sum(self.n_cbr[a])) for a in range(self.M)]
else:
self.l_ws = l_ws
def step(self, states: List[VehicleState], env_state=None):
info = self.solve(states)
self.joint_dynamics.qu2state(states, None, self.u_pred[0])
self.joint_dynamics.qu2prediction(self.state_input_predictions, self.q_pred, self.u_pred)
for q in self.state_input_predictions:
q.t = states[0].t
self.u_prev = self.u_pred[0]
u_ws = np.vstack((self.u_pred[1:], self.u_pred[-1]))
u = []
for a in range(self.M):
si = int(np.sum(self.num_ua_d[:a]))
ei = si + int(self.num_ua_d[a])
u.append(u_ws[:,si:ei].ravel())
self.set_warm_start(u)
return info
def solve(self, states: List[VehicleState]):
solve_info = {}
solve_start = datetime.now()
u_i = []
for a in range(self.M):
u_i.append(self.u_ws[a].ravel())
l_i = copy.copy(self.l_ws)
x0 = self.joint_dynamics.state2q(states)
up = copy.copy(self.u_prev)
u_im1 = copy.copy(u_i)
if self.verbose:
J = self.f_J(np.concatenate(u_i), x0, up)
print(f'ego cost: {J[0]}, tar cost: {J[1]}')
if self.debug_plot:
self._update_debug_plot(u_i, x0, up)
if self.pause_on_plot:
pdb.set_trace()
ibr_converged = False
ibr_it = 0
iter_sols = []
while True:
ibr_it_start = datetime.now()
iter_sols.append(u_i)
if self.verbose:
print('===================================================')
print(f'IBR iteration: {ibr_it}')
cond = None
for a in range(self.M):
# for a in range(self.M-1, -1, -1):
# if ibr_it == 0 or not self.use_ps:
if self.use_ps and a == 0 and self.alpha > 0:
# Compute policy gradient
Duo_ubr_v = []
for b in range(self.M):
if b != a:
uo = np.concatenate([u_i[c] for c in range(self.M) if c != b])
try:
Duo_ubr = self.f_Duo_ubr[b](u_i[b], l_i[b], uo, x0, up).toarray()
Duo_ubr_v.append(Duo_ubr.ravel(order='F'))
except Exception as e:
print(e)
pdb.set_trace()
p = np.concatenate((x0,
up,
np.concatenate(u_i),
np.concatenate(Duo_ubr_v),
np.array([self.alpha])))
solver_args = {}
solver_args['x0'] = u_i[a]
solver_args['lam_g0'] = l_i[a]
solver_args['lbx'] = -np.inf*np.ones(self.N*self.num_ua_d[a])
solver_args['ubx'] = np.inf*np.ones(self.N*self.num_ua_d[a])
solver_args['lbg'] = -np.inf*np.ones(np.sum(self.n_c))
solver_args['ubg'] = np.zeros(np.sum(self.n_c))
solver_args['p'] = p
sol = self.ps_br_solvers[a](**solver_args)
if self.verbose:
print(self.ps_br_solvers[a].stats()['return_status'])
if self.ps_br_solvers[a].stats()['success'] or self.ps_br_solvers[a].stats()['return_status'] == 'Maximum_Iterations_Exceeded':
u_i[a] = sol['x'].toarray().squeeze()
l_i[a] = sol['lam_g'].toarray().squeeze()
else:
pdb.set_trace()
# G_i[a] = self.f_Dua_Lps[a](np.concatenate(u_i), l_i[a], np.concatenate(u_im1), g, x0, up)
else:
uo = np.concatenate([u_i[b] for b in range(self.M) if b != a])
p = np.concatenate((x0, up, uo))
solver_args = {}
solver_args['x0'] = u_i[a]
solver_args['lam_g0'] = l_i[a]
solver_args['lbx'] = -np.inf*np.ones(self.N*self.num_ua_d[a])
solver_args['ubx'] = np.inf*np.ones(self.N*self.num_ua_d[a])
solver_args['lbg'] = -np.inf*np.ones(np.sum(self.n_cbr[a]))
solver_args['ubg'] = np.zeros(np.sum(self.n_cbr[a]))
solver_args['p'] = p
sol = self.br_solvers[a](**solver_args)
if self.verbose:
print(self.br_solvers[a].stats()['return_status'])
if self.br_solvers[a].stats()['success'] or self.br_solvers[a].stats()['return_status'] == 'Maximum_Iterations_Exceeded':
u_i[a] = sol['x'].toarray().squeeze()
l_i[a] = sol['lam_g'].toarray().squeeze()
else:
pdb.set_trace()
if self.debug_plot:
u_bar = copy.deepcopy(u_i)
if self.use_ps and a == 0 and self.alpha > 0:
u_bar[1] += Duo_ubr @ (u_bar[0] - u_im1[0])
self._update_debug_plot(u_bar, x0, up)
if self.pause_on_plot:
pdb.set_trace()
du = [np.linalg.norm(u_i[a]-u_im1[a]) for a in range(self.M)]
if self.verbose:
print('Delta strategy:', du)
if np.amax(du) < self.p_tol:
ibr_converged = True
if self.verbose: print('IBR converged')
break
u_im1 = copy.deepcopy(u_i)
ibr_it_dur = (datetime.now()-ibr_it_start).total_seconds()
if self.verbose:
print(f'IBR iteration {ibr_it} time: {ibr_it_dur}')
# print(f'SQP step size primal: {ps:.4e}, dual: {ds:.4e}')
# print('SQP iterate: ', u)
print('===================================================')
if self.verbose:
J = self.f_J(np.concatenate(u_i), x0, up)
print(f'ego cost: {J[0]}, tar cost: {J[1]}')
ibr_it += 1
if ibr_it >= self.ibr_iters:
if self.verbose: print('Max IBR iterations reached')
break
x_bar = np.array(self.f_state_rollout(np.concatenate(u_i), x0)).squeeze()
u_bar = []
for a in range(self.M):
u_bar.append(u_i[a].reshape((self.N, self.num_ua_d[a])))
self.q_pred = x_bar
self.u_pred = np.hstack(u_bar)
self.l_pred = l_i
solve_dur = (datetime.now()-solve_start).total_seconds()
print(f'Solve time: {solve_dur}')
J = self.f_J(np.concatenate(u_i), x0, up)
print(f'ego cost: {J[0]}, tar cost: {J[1]}')
solve_info['time'] = solve_dur
solve_info['num_iters'] = ibr_it
solve_info['status'] = ibr_converged
solve_info['cost'] = J
solve_info['cond'] = cond
solve_info['iter_sols'] = iter_sols
if self.debug_plot:
plt.ioff()
return solve_info
def solve_br(self, state: List[VehicleState], agent_id: int, params: np.ndarray):
if not self.initialized:
raise(RuntimeError('NL MPC controller is not initialized, run NL_MPC.initialize() before calling NL_MPC.solve()'))
x = self.joint_dynamics.state2q(state)
n_u = self.num_ua_d[agent_id]
if self.u_ws[agent_id] is None:
warnings.warn('Initial guess of open loop input sequence not provided, using zeros')
self.u_ws[agent_id] = np.zeros((self.N, n_u))
# Construct initial guess for the decision variables and the runtime problem data
p = np.concatenate((x, self.u_prev, *params))
solver_args = {}
solver_args['x0'] = self.u_ws[agent_id].ravel()
solver_args['lbx'] = -np.inf*np.ones(self.N*n_u)
solver_args['ubx'] = np.inf*np.ones(self.N*n_u)
solver_args['lbg'] = -np.inf*np.ones(np.sum(self.n_cbr[agent_id]))
solver_args['ubg'] = np.zeros(np.sum(self.n_cbr[agent_id]))
solver_args['p'] = p
# if self.lam_g_ws is not None:
# solver_args['lam_g0'] = self.lam_g_ws
sol = self.br_solvers[agent_id](**solver_args)
if self.br_solvers[agent_id].stats()['success']:
# Unpack solution
u_sol = sol['x'].toarray().squeeze()
u_joint = []
i = 0
for a in range(self.M):
if a == agent_id:
u_joint.append(u_sol)
else:
u_joint.append(params[i])
i += 1
x_pred = np.array(self.f_state_rollout(np.concatenate(u_joint), x)).squeeze()
u_pred = np.reshape(u_sol, (self.N, n_u))
# slack_sol = sol['x'][(self.n_q+self.n_u)*self.N:]
# lam_g_ws = sol['lam_g'].toarray()
self.x_pred = x_pred
self.u_pred = u_pred
else:
u_joint = []
i = 0
for a in range(self.M):
if a == agent_id:
u_joint.append(self.u_pred[-1])
else:
u_joint.append(params[i][-self.num_ua_d[a]:])
i += 1
self.x_pred = np.vstack((x, self.x_pred[2:], self.joint_dynamics.fd(self.x_pred[-1], np.concatenate(u_joint)).toarray().squeeze()))
self.u_pred = np.vstack((self.u_pred[1:], self.u_pred[-1]))
# lam_g_ws = np.zeros(np.sum(self.n_ca[agent_id]))
return {'status': self.br_solvers[agent_id].stats()['success'],
'stats': self.br_solvers[agent_id].stats(),
'sol': sol}
def _evaluate_br(self, u, l, x0, up):
u = np.concatenate(u)
c = [ca.vertcat(*self.f_Cbr[a](u, x0, up)).toarray().squeeze() for a in range(self.M)]
G = [self.f_Dua_Lbr[a](u, l[a], x0, up).toarray().squeeze() for a in range(self.M)]
return c, G
def _evaluate_ps(self, u, l, x0, up):
u = np.concatenate(u)
c = ca.vertcat(*self.f_C(u, x0, up)).toarray().squeeze()
# G = [self.f_Dua_Lps[a](u, l[a], um, g, x0, up).toarray().squeeze() for a in range(self.M)]
return c
def _build_solver(self):
# Build best response OCPs
# Put optimal control problem in batch form
x_ph = [ca.MX.sym('x_ph_0', self.n_q)] # Initial state
# u_0, ..., u_N-1, u_-1
u_ph = [[ca.MX.sym(f'u{a}_ph_{k}', self.num_ua_d[a]) for k in range(self.N+1)] for a in range(self.M)] # Agent inputs
ua_ph = [ca.vertcat(*u_ph[a][:-1]) for a in range(self.M)] # [u_0^1, ..., u_{N-1}^1, u_0^2, ..., u_{N-1}^2]
uk_ph = [ca.vertcat(*[u_ph[a][k] for a in range(self.M)]) for k in range(self.N+1)] # [[u_0^1, u_0^2], ..., [u_{N-1}^1, u_{N-1}^2]]
for k in range(self.N):
x_ph.append(self.joint_dynamics.fd(x_ph[k], uk_ph[k]))
self.f_state_rollout = ca.Function('f_state_rollout', [ca.vertcat(*ua_ph), x_ph[0]], x_ph, self.options)
# Agent cost functions
J = [ca.DM.zeros(1) for _ in range(self.M)]
for a in range(self.M):
for k in range(self.N):
J[a] += self.costs_sym[a][k](x_ph[k], u_ph[a][k], u_ph[a][k-1])
J[a] += self.costs_sym[a][-1](x_ph[-1])
self.f_J = ca.Function('f_J', [ca.vertcat(*ua_ph), x_ph[0], uk_ph[-1]], J, self.options)
Cs = [[] for _ in range(self.N+1)] # Shared constraints
Ca = [[[] for _ in range(self.N+1)] for _ in range(self.M)] # Agent specific constraints
for k in range(self.N):
# Add shared constraints
if self.shared_constraints_sym[k] is not None:
Cs[k].append(self.shared_constraints_sym[k](x_ph[k], uk_ph[k], uk_ph[k-1]))
if len(Cs[k]) > 0:
Cs[k] = ca.vertcat(*Cs[k])
self.n_cs[k] = Cs[k].shape[0]
else:
Cs[k] = ca.DM()
# Add agent constraints
for a in range(self.M):
if self.constraints_sym[a][k] is not None:
Ca[a][k].append(self.constraints_sym[a][k](x_ph[k], u_ph[a][k], u_ph[a][k-1]))
# Add agent box constraints
if len(self.input_ub_idxs[a]) > 0:
Ca[a][k].append(u_ph[a][k][self.input_ub_idxs[a]] - self.input_ub[a][self.input_ub_idxs[a]])
if len(self.input_lb_idxs[a]) > 0:
Ca[a][k].append(self.input_lb[a][self.input_lb_idxs[a]] - u_ph[a][k][self.input_lb_idxs[a]])
if k > 0:
if len(self.state_ub_idxs[a]) > 0:
Ca[a][k].append(x_ph[k][self.state_ub_idxs[a]+int( | np.sum(self.num_qa_d[:a]) | numpy.sum |
"""Plotting methods."""
from collections import Counter
from itertools import cycle
from itertools import islice
import os
import pickle
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import LinearLocator
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import AutoMinorLocator
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .helpers import identify_peaks
from .helpers import load_pickle
from .helpers import millify
from .helpers import round_to_nearest
from .helpers import set_xrotation
__FRAME_COLORS__ = ["#1b9e77", "#d95f02", "#7570b3"]
__FRAME_COLORS__ = ["#fc8d62", "#66c2a5", "#8da0cb"]
DPI = 300
def setup_plot():
"""Setup plotting defaults"""
plt.rcParams["savefig.dpi"] = 120
plt.rcParams["figure.dpi"] = 120
plt.rcParams["figure.autolayout"] = False
plt.rcParams["figure.figsize"] = 12, 8
plt.rcParams["axes.labelsize"] = 18
plt.rcParams["axes.titlesize"] = 20
plt.rcParams["font.size"] = 10
plt.rcParams["lines.linewidth"] = 2.0
plt.rcParams["lines.markersize"] = 8
plt.rcParams["legend.fontsize"] = 14
sns.set_style("white")
sns.set_context("paper", font_scale=2)
def setup_axis(ax, axis="x", majorticks=5, minorticks=1, xrotation=45, yrotation=0):
"""Setup axes defaults
Parameters
----------
ax : matplotlib.Axes
axis : str
Setup 'x' or 'y' axis
majorticks : int
Length of interval between two major ticks
minorticks : int
Length of interval between two major ticks
xrotation : int
Rotate x axis labels by xrotation degrees
yrotation : int
Rotate x axis labels by xrotation degrees
"""
major_locator = MultipleLocator(majorticks)
major_formatter = FormatStrFormatter("%d")
minor_locator = MultipleLocator(minorticks)
if axis == "x":
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(major_formatter)
ax.xaxis.set_minor_locator(minor_locator)
elif axis == "y":
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
elif axis == "both":
setup_axis(ax, "x", majorticks, minorticks, xrotation, yrotation)
setup_axis(ax, "y", majorticks, minorticks, xrotation, yrotation)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ax.yaxis.set_minor_locator(AutoMinorLocator())#integer=True))
ax.tick_params(which="major", width=2, length=10)
ax.tick_params(which="minor", width=1, length=6)
ax.tick_params(axis="x", labelrotation=xrotation)
ax.tick_params(axis="y", labelrotation=yrotation)
# ax.yaxis.set_major_locator(LinearLocator(10))
# ax.yaxis.set_minor_locator(LinearLocator(10))
# set_xrotation(ax, xrotation)
def plot_read_length_dist(
read_lengths,
ax=None,
millify_labels=True,
input_is_stream=False,
title=None,
saveto=None,
ascii=False,
**kwargs
):
"""Plot read length distribution.
Parameters
----------
read_lengths : array_like
Array of read lengths
ax : matplotlib.Axes
Axis object
millify_labels : bool
True if labels should be formatted to
read millions/trillions etc
input_is_stream : bool
True if input is sent through stdin
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
if input_is_stream:
counter = {}
for line in read_lengths:
splitted = list([int(x) for x in line.strip().split("\t")])
counter[splitted[0]] = splitted[1]
read_lengths = Counter(counter)
elif isinstance(read_lengths, six.string_types):
if ".pickle" in str(read_lengths):
# Try opening as a pickle first
read_lengths = load_pickle(read_lengths)
elif isinstance(read_lengths, pd.Series):
pass
else:
# Some random encoding error
try:
read_lengths = pd.read_table(read_lengths)
read_lengths = pd.Series(
read_lengths["count"].tolist(),
index=read_lengths.read_length.tolist(),
)
except KeyError:
pass
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 5
if "minorticks" not in kwargs:
kwargs["minorticks"] = 1
if "xrotation" not in kwargs:
kwargs["xrotation"] = 0
if isinstance(read_lengths, Counter) or isinstance(read_lengths, pd.Series):
read_lengths = pd.Series(read_lengths)
read_lengths_counts = read_lengths.values
else:
read_lengths = pd.Series(read_lengths)
read_lengths_counts = read_lengths.value_counts().sort_index()
ax.set_ylim(
min(read_lengths_counts), round_to_nearest(max(read_lengths_counts), 5) + 0.5
)
ax.set_xlim(
min(read_lengths.index) - 0.5,
round_to_nearest(max(read_lengths.index), 10) + 0.5,
)
ax.bar(read_lengths.index, read_lengths_counts)
setup_axis(ax, **kwargs)
reads_total = millify(read_lengths_counts.sum())
if title:
ax.set_title("{}\n Total reads = {}".format(title, reads_total))
else:
ax.set_title("Total reads = {}".format(reads_total))
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
# sns.despine(trim=True, offset=20)
if saveto:
fig.tight_layout()
if ".dat" in saveto:
fig.savefig(saveto, format="png", dpi=DPI)
else:
fig.savefig(saveto, dpi=DPI)
if ascii:
import gnuplotlib as gp
sys.stdout.write(os.linesep)
gp.plot(
(read_lengths.index, read_lengths.values, {"with": "boxes"}),
terminal="dumb 160, 40",
unset="grid",
)
sys.stdout.write(os.linesep)
return ax, fig
def plot_framewise_counts(
counts,
frames_to_plot="all",
ax=None,
title=None,
millify_labels=False,
position_range=None,
saveto=None,
ascii=False,
input_is_stream=False,
**kwargs
):
"""Plot framewise distribution of reads.
Parameters
----------
counts : Series
A series with position as index and value as counts
frames_to_plot : str or range
A comma seaprated list of frames to highlight or a range
ax : matplotlib.Axes
Default none
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
# setup_plot()
if input_is_stream:
counts_counter = {}
for line in counts:
splitted = list([int(x) for x in line.strip().split("\t")])
counts_counter[splitted[0]] = splitted[1]
counts = Counter(counts_counter)
elif isinstance(counts, six.string_types):
try:
# Try opening as a pickle first
counts = load_pickle(counts)
except KeyError:
pass
if isinstance(counts, Counter):
counts = pd.Series(counts)
# TODO
if isinstance(frames_to_plot, six.string_types) and frames_to_plot != "all":
frames_to_plot = list([int(x) for x in frames_to_plot.rstrip().split(",")])
if isinstance(position_range, six.string_types):
splitted = list([int(x) for x in position_range.strip().split(":")])
position_range = list(range(splitted[0], splitted[1] + 1))
if position_range:
counts = counts[list(position_range)]
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 10
if "minorticks" not in kwargs:
kwargs["minorticks"] = 5
if "xrotation" not in kwargs:
kwargs["xrotation"] = 90
setup_axis(ax, **kwargs)
ax.set_ylabel("Number of reads")
# ax.set_xlim(
# min(counts.index) - 0.6,
# round_to_nearest(max(counts.index), 10) + 0.6)
barlist = ax.bar(counts.index, counts.values)
barplot_colors = list(islice(cycle(__FRAME_COLORS__), None, len(counts.index)))
for index, cbar in enumerate(barlist):
cbar.set_color(barplot_colors[index])
ax.legend(
(barlist[0], barlist[1], barlist[2]),
("Frame 1", "Frame 2", "Frame 3"),
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc=3,
ncol=3,
mode="expand",
borderaxespad=0.0,
)
if title:
ax.set_title(title)
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
if ascii:
sys.stdout.write(os.linesep)
import gnuplotlib as gp
gp.plot(
np.array(counts.index.tolist()),
np.array(counts.values.tolist()),
_with="boxes", # 'points pointtype 0',
terminal="dumb 200,40",
unset="grid",
)
sys.stdout.write(os.linesep)
set_xrotation(ax, kwargs["xrotation"])
fig.tight_layout()
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
return ax
def plot_read_counts(
counts,
ax=None,
marker=None,
color="royalblue",
title=None,
label=None,
millify_labels=False,
identify_peak=True,
saveto=None,
position_range=None,
ascii=False,
input_is_stream=False,
ylabel="Normalized RPF density",
**kwargs
):
"""Plot RPF density aro und start/stop codons.
Parameters
----------
counts : Series/Counter
A series with coordinates as index and counts as values
ax : matplotlib.Axes
Axis to create object on
marker : string
'o'/'x'
color : string
Line color
label : string
Label (useful only if plotting multiple objects on same axes)
millify_labels : bool
True if labels should be formatted to
read millions/trillions etc
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
# setup_plot()
if input_is_stream:
counts_counter = {}
for line in counts:
splitted = list([int(x) for x in line.strip().split("\t")])
counts_counter[splitted[0]] = splitted[1]
counts = Counter(counts_counter)
elif isinstance(counts, six.string_types):
try:
# Try opening as a pickle first
counts = load_pickle(counts)
except IndexError:
counts_pd = pd.read_table(counts)
counts = pd.Series(
counts_pd["count"].tolist(), index=counts_pd["position"].tolist()
)
except KeyError:
pass
if not isinstance(counts, pd.Series):
counts = pd.Series(counts)
if isinstance(position_range, six.string_types):
splitted = list([int(x) for x in position_range.strip().split(":")])
position_range = np.arange(splitted[0], splitted[1] + 1)
if position_range is not None:
counts = counts[position_range]
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 10
if "minorticks" not in kwargs:
kwargs["minorticks"] = 5
if "xrotation" not in kwargs:
kwargs["xrotation"] = 0
if "yrotation" not in kwargs:
kwargs["yrotation"] = 0
if not marker:
ax.plot(
counts.index,
counts.values,
color=color,
linewidth=1,
markersize=1.5,
label=label,
)
else:
ax.plot(
counts.index,
counts.values,
color=color,
marker="o",
linewidth=1,
markersize=1.5,
label=label,
)
# ax.set_xlim(round_to_nearest(ax.get_xlim()[0], 50) - 0.6,
# round_to_nearest(ax.get_xlim()[1], 50) + 0.6)
peak = None
if identify_peak:
peak = identify_peaks(counts)
ax.axvline(x=peak, color="r", linestyle="dashed")
ax.text(peak + 0.5, ax.get_ylim()[1] * 0.9, "{}".format(peak), color="r")
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
setup_axis(ax, **kwargs)
ax.set_xlim(
round_to_nearest(min(counts.index), 10) - 1,
round_to_nearest(max(counts.index), 10) + 1,
)
if ylabel:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
# sns.despine(trim=True, offset=10)
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
if ascii:
sys.stdout.write(os.linesep)
import gnuplotlib as gp
gp.plot(
np.array(counts.index.tolist()),
np.array(counts.values.tolist()),
_with="lines", # 'points pointtype 0',
terminal="dumb 200,40",
unset="grid",
)
sys.stdout.write(os.linesep)
return ax, fig, peak
def plot_featurewise_barplot(
utr5_counts, cds_counts, utr3_counts, ax=None, saveto=None, **kwargs
):
"""Plot barplots for 5'UTR/CDS/3'UTR counts.
Parameters
----------
utr5_counts : int or dict
Total number of reads in 5'UTR region
or alternatively a dictionary/series with
genes as key and 5'UTR counts as values
cds_counts : int or dict
Total number of reads in CDs region
or alternatively a dictionary/series with
genes as key and CDS counts as values
utr3_counts : int or dict
Total number of reads in 3'UTR region
or alternatively a dictionary/series with
genes as key and 3'UTR counts as values
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
barlist = ax.bar([0, 1, 2], [utr5_counts, cds_counts, utr3_counts])
barlist[0].set_color("#1b9e77")
barlist[1].set_color("#d95f02")
barlist[2].set_color("#7570b3")
ax.set_xticks([0, 1, 2])
ax.set_xticklabels(["5'UTR", "CDS", "3'UTR"])
max_counts = np.max(np.hstack([utr5_counts, cds_counts, utr3_counts]))
setup_axis(
ax=ax, axis="y", majorticks=max_counts // 10, minorticks=max_counts // 20
)
ax.set_ylabel("# RPFs")
# sns.despine(trim=True, offset=10)
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
return ax, fig
def create_wavelet(data, ax):
import pycwt as wavelet
t = data.index
N = len(data.index)
p = np.polyfit(data.index, data, 1)
data_notrend = data - np.polyval(p, data.index)
std = data_notrend.std() # Standard deviation
var = std ** 2 # Variance
data_normalized = data_notrend / std # Normalized dataset
mother = wavelet.Morlet(6)
dt = 1
s0 = 2 * dt # Starting scale, in this case 2 * 0.25 years = 6 months
dj = 1 / 12 # Twelve sub-octaves per octaves
J = 7 / dj # Seven powers of two with dj sub-octaves
alpha, _, _ = wavelet.ar1(data) # Lag-1 autocorrelation for red noise
wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
data_normalized, dt=dt, dj=dj, s0=s0, J=J, wavelet=mother
)
iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std
power = (np.abs(wave)) ** 2
fft_power = np.abs(fft) ** 2
period = 1 / freqs
power /= scales[:, None]
signif, fft_theor = wavelet.significance(
1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother
)
sig95 = np.ones([1, N]) * signif[:, None]
sig95 = power / sig95
glbl_power = power.mean(axis=1)
dof = N - scales # Correction for padding at edges
glbl_signif, tmp = wavelet.significance(
var, dt, scales, 1, alpha, significance_level=0.95, dof=dof, wavelet=mother
)
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
ax.contourf(
t,
np.log2(period),
| np.log2(power) | numpy.log2 |
#!/usr/bin/env python
"""
This file is part of IMSIS
Licensed under the MIT license:
http://www.opensource.org/licenses/MIT-license
This module contains image processing methods
"""
import os
import sys
import cv2 as cv
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.misc
from matplotlib import pyplot as plt
import numpy.random as random
from matplotlib.colors import hsv_to_rgb
from datetime import datetime
class Image(object):
@staticmethod
def load(filename, verbose=True):
"""Load image
Supported file formats: PNG, TIF, BMP
note: by default images are converted to grayscale (8bit gray), conversion to 8 bit can be disabled.
:Parameters: filename, gray=True, verbose=False
:Returns: image
"""
img = None
if (os.path.isfile(filename)):
img = cv.imread(filename, -1)
if (verbose == True):
print("load file ", filename, img.shape, img.dtype)
else:
print('Error, file does not exist. ', filename)
sys.exit()
try:
q = img.shape
except:
print('Error, File could not be read. ', filename)
sys.exit()
return img
@staticmethod
def crop_rectangle(img, rect):
"""Crop an image using rectangle shape as input [(x0,y0),(x1,y1)]
:Parameters: image, rectangle
:Returns: image
"""
if len(rect) > 0:
out = Image.crop(img, rect[0][0], rect[0][1], rect[1][0], rect[1][1])
else:
print("Error: rectangle not defined.")
out = img
return out
@staticmethod
def crop(img, x0, y0, x1, y1):
"""Crop an image using pixels at x0,y0,x1,y1
:Parameters: image, x0, y0, x1, y1
:Returns: image
"""
res = img[y0:y1, x0:x1] # Crop from y0:y1,x0:x1
# print("Cropped region: (" , x0,y0,x1,y1,")")
return res
@staticmethod
def crop_percentage(img, scale=1.0):
"""Crop an image centered
:Parameters: image, scale=1.0
:Returns: image
"""
center_x, center_y = img.shape[1] / 2, img.shape[0] / 2
width_scaled, height_scaled = img.shape[1] * scale, img.shape[0] * scale
left_x, right_x = center_x - width_scaled / 2, center_x + width_scaled / 2
top_y, bottom_y = center_y - height_scaled / 2, center_y + height_scaled / 2
img_cropped = img[int(top_y):int(bottom_y), int(left_x):int(right_x)]
return img_cropped
@staticmethod
def resize(img, factor=0.5):
"""Resize image
:Parameters: image, factor
:Returns: image
"""
small = cv.resize(img, (0, 0), fx=factor, fy=factor)
return small
@staticmethod
def _blur_edge(img, d=31):
"""blur edge
:Parameters: image, d
:Returns: image
"""
h, w = img.shape[:2]
img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP)
img_blur = cv.GaussianBlur(img_pad, (2 * d + 1, 2 * d + 1), -1)[d:-d, d:-d]
y, x = np.indices((h, w))
dist = np.dstack([x, w - x - 1, y, h - y - 1]).min(-1)
w = np.minimum(np.float32(dist) / d, 1.0)
return img * w + img_blur * (1 - w)
@staticmethod
def _motion_kernel(angle, d, sz=65):
"""determine motion kernel value
:Parameters: angle, d, size
:Returns: kernel
"""
kern = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:, 2] = (sz2, sz2) - np.dot(A[:, :2], ((d - 1) * 0.5, 0))
kern = cv.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
return kern
@staticmethod
def _defocus_kernel(d, sz=65):
"""determine defocus kernel value
:Parameters: d, size
:Returns: kernel
"""
kern = np.zeros((sz, sz), np.uint8)
cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
@staticmethod
def _image_stats(image):
# compute the mean and standard deviation of each channel
(l, a, b) = cv.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return (lMean, lStd, aMean, aStd, bMean, bStd)
@staticmethod
def save(img, fn):
"""Save image (PNG,TIF)
:Parameters: image, filename
"""
try:
if (os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn), exist_ok=True) #mkdir if not empty
cv.imwrite(fn, img)
print("file saved. ", fn)
except:
print("Error: cannot save file {}".format(fn))
@staticmethod
def save_withuniquetimestamp(img):
"""Save PNG image with unique timestamp.
:Parameters: image
"""
path = "./output/"
os.makedirs(os.path.dirname(path), exist_ok=True)
sttime = datetime.now().strftime('Image_%Y%m%d%H%M%S')
fn = path + sttime + '.png'
print("file saved. ", fn)
cv.imwrite(fn, img)
'''
@staticmethod
def PSNR(img1, img2):
"""Return peaksignal to noise ratio
:Parameters: image1, image2
:Returns: float
"""
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
# print(np.sqrt(mse))
n = np.sqrt(mse)
# n=255/3.525
return 20 * np.log10(PIXEL_MAX / n)
'''
# implemented twice remove the 2nd one
@staticmethod
def cut(img, center=[0, 0], size=[0, 0]):
"""return a image cut out
:Parameters: image, center=[0, 0], size=[0, 0]
:Returns: image
"""
x0 = center[0] - round(size[0] * 0.5)
x1 = center[0] + round(size[0] * 0.5)
y0 = center[1] - round(size[1] * 0.5)
y1 = center[1] + round(size[1] * 0.5)
if x0 < 0:
x0 = 0
if y0 < 0:
y0 = 0
template = Image.crop(img, int(x0), int(y0), int(x1), int(y1))
return template
@staticmethod
def _multipleof2(number):
"""Rounds the given number to the nearest multiple of two."""
remainder = number % 2
if remainder > 1:
number += (2 - remainder)
else:
number -= remainder
return int(number)
@staticmethod
def subtract(img0, img1):
"""subtract 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.subtract(img0, img1)
return out
'''
@staticmethod
def add(img0, img1):
"""add 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.addWeighted(img0, 0.5, img1, 0.5, 0.0)
return out
'''
@staticmethod
def add(img0, img1, alpha=0.5):
"""add 2 images weighted (default alpha=0.5)
:Parameters: image1, image2, alpha
:Returns: image
"""
a = img0
b = img1
beta = 1 - alpha
out = cv.addWeighted(a, alpha, b, beta, gamma)
return out
@staticmethod
def new(height, width):
"""Create a new blank image
:Parameters: height,width
:Returns: image
"""
img = np.zeros((height, width), np.uint8)
return img
@staticmethod
def gaussiankernel(kernlen=21, nsig=3):
"""returns a 2D gaussian kernel
:Parameters: kernelsize, nsig
:Returns: image
"""
x = np.linspace(-nsig, nsig, kernlen + 1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d / kern2d.sum()
@staticmethod
def info(img):
"""get image properties
:Parameters: img
"""
print(img.shape)
print(img.size)
print(img.dtype)
@staticmethod
def unique_colours(image):
"""get number of unique colors in an image
:Parameters: img
"""
print(image.shape)
if (len(image.shape) == 3):
out = len(np.unique(image.reshape(-1, image.shape[2]), axis=0))
# b, g, r = cv.split(image)
# out_in_32U_2D = np.int32(b) << 16 + np.int32(g) << 8 + np.int32(r) # bit wise shift 8 for each channel.
# out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
# np.unique(out_in_32U_1D)
# out = len(np.unique(out_in_32U_1D))
else:
out_in_32U_2D = np.int32(image) # bit wise shift 8 for each channel.
out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
np.unique(out_in_32U_1D)
out = len(np.unique(out_in_32U_1D))
print(out)
return out
@staticmethod
def video_to_imagesondisk(file_in='video.avi', path_out='images'):
"""video to image
:Parameters: video_filename
:Returns: images
"""
video_file = file_in
output_folder = path_out
vidcap = cv.VideoCapture(video_file)
success, image = vidcap.read()
count = 0
success = True
while success:
fn = output_folder + "/" + "frame%d.png" % count
cv.imwrite(fn, image) # save frame as JPEG file
success, image = vidcap.read()
print('Read a new frame: ', success, fn)
count += 1
print("ready.")
@staticmethod
def imagesfromdisk_to_video(path_in, file_out='video.avi', framerate=15):
"""images from file to video
:Parameters: path with list of frames
:Returns: video
"""
image_folder = path_in
video_name = file_out
output_folder = "output"
fn = image_folder + "/" + output_folder + "/"
print(fn)
os.makedirs(os.path.dirname(fn), exist_ok=True)
images = [img for img in os.listdir(image_folder) if (img.endswith(".tif") or img.endswith(".png"))]
frame = cv.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(fn + video_name, 0, framerate, (width, height))
for image in images:
video.write(cv.imread(os.path.join(image_folder, image)))
cv.destroyAllWindows()
video.release()
'''
@staticmethod
def zoom(image0, factor=2):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0,factor)
x0 = int(factor*w/4)
y0 = int(factor*h/4)
x1 = x0+w
y1 = y0+h
print(x0,y0,x1,y1,w,h,img.shape[0],img.shape[1])
img = Image.crop(img,x0,y0,x1,y1)
return img
'''
@staticmethod
def zoom(image0, factor=2, cx=0.5, cy=0.5):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0, factor)
x0 = int(factor * w * cx * 0.5)
y0 = int(factor * h * cy * 0.5)
x1 = x0 + w
y1 = y0 + h
# print(x0, y0, x1, y1, w, h, img.shape[0], img.shape[1])
img = Image.crop(img, x0, y0, x1, y1)
return img
class Process:
@staticmethod
def directionalsharpness(img, ksize=-1):
"""
DirectionalSharpness
Measure sharnpess in X and Y seperately
Note: Negative slopes are missed when converting to unaryint8, therefore convert to float
:Parameters: image, kernel
:Returns: gradientx , gradienty, gradientxy, theta
"""
sobelx64f = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=ksize)
sobely64f = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=ksize)
grad = np.power(np.power(sobelx64f, 2.0) + np.power(sobely64f, 2.0), 0.5)
theta = np.arctan2(sobely64f, sobelx64f)
Gx = np.absolute(sobelx64f)
Gy = np.absolute(sobely64f)
mx = cv.mean(Gx)[0]
my = cv.mean(Gy)[0]
return mx, my, grad, theta
@staticmethod
def gradient_image(img, kx=11, ky=3):
"""Create a gradient image
Method used: gradient by bi-directional sobel filter
:Parameters: image, blurkernelx, blurkernely
:Returns: image
"""
# Calculate gradient
gx = cv.Sobel(img, cv.CV_32F, 1, 0, ksize=1)
gy = cv.Sobel(img, cv.CV_32F, 0, 1, ksize=1)
# mag, angle = cv.cartToPolar(gx, gy, angleInDegrees=True)
blurredgx = cv.GaussianBlur(gx, (kx, ky), 1)
blurredgy = cv.GaussianBlur(gy, (kx, ky), 1)
mag, angle = cv.cartToPolar(blurredgx, blurredgy)
return mag, angle
@staticmethod
def gradient_image_nonmaxsuppressed(img, blur=5, threshold=40):
"""Apply non maximum suppressed gradient filter sequence
threshold not used??
:Parameters: image, blur=5, threshold=40
:Returns: image, angle
"""
def nonmaxsuppression(im, grad):
# Non-maximum suppression
gradSup = grad.copy()
for r in range(im.shape[0]):
for c in range(im.shape[1]):
# Suppress pixels at the image edge
if r == 0 or r == im.shape[0] - 1 or c == 0 or c == im.shape[1] - 1:
gradSup[r, c] = 0
continue
tq = thetaQ[r, c] % 4
if tq == 0: # 0 is E-W (horizontal)
if grad[r, c] <= grad[r, c - 1] or grad[r, c] <= grad[r, c + 1]:
gradSup[r, c] = 0
if tq == 1: # 1 is NE-SW
if grad[r, c] <= grad[r - 1, c + 1] or grad[r, c] <= grad[r + 1, c - 1]:
gradSup[r, c] = 0
if tq == 2: # 2 is N-S (vertical)
if grad[r, c] <= grad[r - 1, c] or grad[r, c] <= grad[r + 1, c]:
gradSup[r, c] = 0
if tq == 3: # 3 is NW-SE
if grad[r, c] <= grad[r - 1, c - 1] or grad[r, c] <= grad[r + 1, c + 1]:
gradSup[r, c] = 0
return gradSup
img = Image.Convert.toGray(img)
im = np.array(img, dtype=float) # Convert to float to prevent clipping values
# Gaussian Blur
im2 = cv.GaussianBlur(im, (blur, blur), 0)
# Find gradients
im3h = cv.filter2D(im2, -1, np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]))
im3v = cv.filter2D(im2, -1, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# Get gradient and direction
grad = np.power(np.power(im3h, 2.0) + np.power(im3v, 2.0), 0.5)
theta = np.arctan2(im3v, im3h)
thetaQ = (np.round(theta * (5.0 / np.pi)) + 5) % 5 # Quantize direction
gradSup = nonmaxsuppression(im, grad)
return gradSup, thetaQ
@staticmethod
def nonlocalmeans(img, h=10, templatewindowsize=7, searchwindowsize=21):
"""Apply a non-local-means filter with filtering strength (h), template windowsize (blocksize), searchwindowsize
:Parameters: image, h=10, templatewindowsize=7, searchwindowsize=21
:Returns: image
"""
# img = cv.pyrDown(img)
dst = cv.fastNlMeansDenoising(img, None, h, templatewindowsize, searchwindowsize)
return dst
@staticmethod
def deconvolution_wiener(img, d=3, noise=11):
"""Apply Wiener deconvolution
grayscale images only
:Parameters: image, d, noise
:Returns: kernel
"""
img = Image.Convert.toGray(img)
noise = 10 ** (-0.1 * noise)
img = np.float32(img) / 255.0
IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
psf = Image._defocus_kernel(d)
psf /= psf.sum()
psf_pad = np.zeros_like(img)
kh, kw = psf.shape
psf_pad[:kh, :kw] = psf
PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)
PSF2 = (PSF ** 2).sum(-1)
iPSF = PSF / (PSF2 + noise)[..., np.newaxis]
RES = cv.mulSpectrums(IMG, iPSF, 0)
res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
res = np.roll(res, -kh // 2, 0)
res = np.roll(res, -kw // 2, 1)
return res
@staticmethod
def median(image, kernel=5):
"""Apply a median filter
:Parameters: image
:Returns: image
"""
out = cv.medianBlur(image, kernel)
return out
@staticmethod
def cannyedge_auto(image, sigma=0.33):
"""Apply a Canny Edge filter automatically
:Parameters: image, sigma
:Returns: image
"""
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv.Canny(image, lower, upper)
return edged
# smooth, threshold
@staticmethod
def gaussian_blur(img, smooth=3):
"""Gaussian blur image with kernel n
:Parameters: image, kernel
:Returns: image
"""
# img = cv.pyrDown(img)
imout = cv.GaussianBlur(img, (smooth, smooth), 0)
return imout
@staticmethod
def unsharp_mask(img, kernel_size=5, sigma=1.0, amount=1.0, threshold=0):
"""Unsharp mask filter
:Parameters: image, kernel_size=5, sigma=1.0, amount=1.0, threshold=0
:Returns: image
"""
blurred = cv.GaussianBlur(img, (5, 5), sigma)
sharpened = float(amount + 1) * img - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(img - blurred) < threshold
np.copyto(sharpened, img, where=low_contrast_mask)
return sharpened
@staticmethod
def FFT(img):
"""Apply a fourier transform
generate a discrete fourier transform shift matrix and a magnitude spectrum image for viewing
:Parameters: image
:Returns: dft_shift, specimage
"""
# img = Image.Convert.toGray(img)
# do dft saving as complex output
dft = np.fft.fft2(img, axes=(0, 1))
# apply shift of origin to center of image
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
# magnitude_spectrum[np.isneginf(magnitude_spectrum)] = 0
return dft_shift, spec
@staticmethod
def IFFT(fft_img):
"""Apply an inverse fourier transform
:Parameters: image_fft
:Returns: image
"""
back_ishift = np.fft.ifftshift(fft_img)
img_back = np.fft.ifft2(back_ishift, axes=(0, 1))
img_back = np.abs(img_back).clip(0, 255).astype(np.uint8)
return img_back
@staticmethod
def FD_bandpass_filter(img, D0=5, w=10, bptype=0):
gray = Image.Convert.toGray(img)
kernel = Image.FilterKernels.ideal_bandpass_kernel(gray, D0, w)
if bptype == 1:
kernel = Image.FilterKernels.gaussian_bandpass_kernel(gray, D0, w)
elif bptype == 2:
kernel = Image.FilterKernels.butterworth_bandpass_kernel(gray, D0, w)
gray = np.float64(gray)
gray_fft = np.fft.fft2(gray)
gray_fftshift = np.fft.fftshift(gray_fft)
dst_filtered = np.multiply(kernel, gray_fftshift)
dst_ifftshift = np.fft.ifftshift(dst_filtered)
dst_ifft = np.fft.ifft2(dst_ifftshift)
dst = np.abs(np.real(dst_ifft))
dst = np.clip(dst, 0, 255)
out = np.uint8(dst)
return out, kernel
'''
def FFT_highpass(img, maskradius=8, maskblur=19):
dft = np.fft.fft2(img, axes=(0, 1))
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
radius = maskradius
mask = np.zeros_like(img, dtype=np.float32)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv.circle(mask, (cx, cy), radius, (1, 1, 1), -1)[0]
mask = 1 - mask
mask = 1 + 0.5 * mask # high boost filter (sharpening) = 1 + fraction of high pass filter
if maskblur > 0:
mask2 = cv.GaussianBlur(mask, (maskblur, maskblur), 0)
dft_shift_masked2 = np.multiply(dft_shift, mask2)
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
img_filtered2 = np.fft.ifft2(back_ishift_masked2, axes=(0, 1))
out = np.abs(img_filtered2).clip(0, 255).astype(np.uint8)
else:
dft_shift_masked = np.multiply(dft_shift, mask)
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
img_filtered = np.fft.ifft2(back_ishift_masked, axes=(0, 1))
out = np.abs(img_filtered).clip(0, 255).astype(np.uint8)
mask2= mask
return out, mask2
def FFT_lowpass(img, maskradius=8, maskblur=19):
dft = np.fft.fft2(img, axes=(0, 1))
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
radius = maskradius
mask = np.zeros_like(img, dtype=np.float32)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv.circle(mask, (cx, cy), radius, (255, 255, 255), -1)[0]
if maskblur > 0:
mask2 = cv.GaussianBlur(mask, (maskblur, maskblur), 0)
dft_shift_masked2 = np.multiply(dft_shift, mask2)/ 255
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
img_filtered2 = np.fft.ifft2(back_ishift_masked2, axes=(0, 1))
out = np.abs(img_filtered2).clip(0, 255).astype(np.uint8)
else:
dft_shift_masked = np.multiply(dft_shift, mask)/ 255
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
img_filtered = np.fft.ifft2(back_ishift_masked, axes=(0, 1))
out = np.abs(img_filtered).clip(0, 255).astype(np.uint8)
mask2 = mask
return out,mask2
'''
'''
@staticmethod
def FFT_lowpass(img, radius=16, lpType=2, n=2):
"""Lowpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, radius, lptype, n
:Returns: image, mask
"""
def createLPFilter(shape, center, radius, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.power(c, 2.0) + np.power(r, 2.0)
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # ideal low-pass filter
lpFilter = np.copy(d)
lpFilter[lpFilter < pow(radius, 2.0)] = 1
lpFilter[lpFilter >= pow(radius, 2.0)] = 0
elif lpType == 1: # Butterworth low-pass filter
lpFilter = 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
elif lpType == 2: # Gaussian low pass filter
lpFilter = np.exp(-d / (2 * pow(radius, 2.0)))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createLPFilter(dft_shift.shape, (cx, cy), radius=radius, lpType=lpType, n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_highpass(img, radius=16, lpType=2, n=2):
"""Highpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, radius, lptype, n
:Returns: image, mask
"""
def createHPFilter(shape, center, radius, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.power(c, 2.0) + np.power(r, 2.0)
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # Ideal high pass filter
lpFilter = np.copy(d)
lpFilter[lpFilter < pow(radius, 2.0)] = 0
lpFilter[lpFilter >= pow(radius, 2.0)] = 1
elif lpType == 1: # Butterworth Highpass Filters
lpFilter = 1.0 - 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
elif lpType == 2: # Gaussian Highpass Filter
lpFilter = 1.0 - np.exp(-d / (2 * pow(radius, 2.0)))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createHPFilter(dft_shift.shape, (cx, cy), radius=radius, lpType=lpType, n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_bandpass(img, bandcenter=32, bandwidth=16, lpType=2, n=2):
"""Bandpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, bandcenter, bandwidth, lptype, n
:Returns: image, mask
"""
def createBPFilter(shape, center, bandCenter, bandWidth, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.sqrt(np.power(c, 2.0) + np.power(r, 2.0))
lpFilter_matrix = np.zeros((rows,cols), np.float32)
if lpType == 0: # Ideal bandpass filter
lpFilter = np.copy(d)
lpFilter[:, :] = 1
lpFilter[d > (bandCenter + bandWidth / 2)] = 0
lpFilter[d < (bandCenter - bandWidth / 2)] = 0
elif lpType == 1: # Butterworth bandpass filter
if bandCenter ==0:
bandCenter=1
lpFilter = 1.0 - 1.0 / (1 + np.power(d * bandWidth / (d - pow(bandCenter, 2)), 2 * n))
elif lpType == 2: # Gaussian bandpass filter
if bandWidth ==0:
bandWidth=1
lpFilter = np.exp(-pow((d - pow(bandCenter, 2)) / (d * bandWidth), 2))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createBPFilter(dft_shift.shape, (cx, cy), bandCenter=bandcenter, bandWidth=bandwidth, lpType=lpType,
n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
#print(mask.dtype,dft_shift.dtype)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_bandstop(img, bandcenter=32, bandwidth=16, lpType=2, n=2):
"""Bandstop filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, bandcenter, bandwidth, lptype, n
:Returns: image, mask
"""
def createBRFilter(shape, center, bandCenter, bandWidth, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.sqrt(np.power(c, 2.0) + np.power(r, 2.0))
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # Ideal band stop filter
lpFilter = np.copy(d)
lpFilter[:, :] = 0
lpFilter[d > (bandCenter + bandWidth / 2)] = 1
lpFilter[d < (bandCenter - bandWidth / 2)] = 1
elif lpType == 1: # Butterworth band stop filter
lpFilter = 1.0 / (1 + np.power(d * bandWidth / (d - pow(bandCenter, 2)), 2 * n))
elif lpType == 2: # Gaussian band stop filter
lpFilter = 1 - np.exp(-pow((d - pow(bandCenter, 2)) / (d * bandWidth), 2))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createBRFilter(dft_shift.shape, (cx, cy), bandCenter=bandcenter, bandWidth=bandwidth, lpType=lpType,
n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
'''
def pencilsketch(img):
"""Apply a pencil sketch filter to a grayscale image
:Parameters: image
:Returns: image
"""
def dodgeV2(image, mask):
return cv.divide(image, 255 - mask, scale=256)
def burnV2(image, mask):
return 255 - cv.divide(255 - image, 255 - mask, scale=256)
img_gray_inv = 255 - img
img_blur = cv.GaussianBlur(img_gray_inv, ksize=(21, 21),
sigmaX=0, sigmaY=0)
out = dodgeV2(img, img_blur)
return out
def sepia(img):
"""Apply sepia filter
:Parameters: image
:Returns: image
"""
res = img.copy()
res = cv.cvtColor(res, cv.COLOR_BGR2RGB) # converting to RGB as sepia matrix is for RGB
res = np.array(res, dtype=np.float64)
res = cv.transform(res, np.matrix([[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]))
res[np.where(res > 255)] = 255 # clipping values greater than 255 to 255
res = np.array(res, dtype=np.uint8)
res = cv.cvtColor(res, cv.COLOR_RGB2BGR)
return res
@staticmethod
def gaussian_noise(img, prob=0.25):
""" Add gaussian noise
:Parameters: image, sigma=0.25
:Returns: image
"""
noise_img = img.astype(np.float)
stddev = prob * 100.0
noise = np.random.randn(*img.shape) * stddev
noise_img += noise
noise_img = np.clip(noise_img, 0, 255).astype(np.uint8)
return noise_img
@staticmethod
def salt_and_pepper_noise(image, prob=0.01):
"""Add salt and pepper noise
:Parameters: image, sigma=0.01
:Returns: image
"""
output = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
output[i][j] = 0
elif rdn > thres:
output[i][j] = 255
else:
output[i][j] = image[i][j]
return output
@staticmethod
def poisson_noise(img, prob=0.25):
""" Induce poisson noise
:Parameters: image, lambda=0.25
:Returns: image
"""
# Noise range from 0 to 100
"""
seed = 42
data = np.float32(img / 255) #convert to float to add poisson noise
np.random.seed(seed=seed)
out = np.random.poisson(data * 256) / 256.
out = np.uint8(out*255)
out = np.clip(out, 0, 255).astype(np.uint8) #convert back to UINT8
"""
# data = np.float32(img) #convert to float to add poisson noise
data = img.astype(np.float)
noise = prob
# peak = 256.0-noise*(256-32)
peak = 256.0 - noise * (256)
# print(noise,peak)
noise_image = np.random.poisson(data / 255.0 * peak) / peak * 255
out = np.clip(noise_image, 0, 255).astype(np.uint8)
return out
@staticmethod
def k_means(image, k=3):
""" k_means clustering
:Parameters: image, k=3
:Returns: image
"""
pixel_vals = image.reshape((-1, 3))
pixel_vals = np.float32(pixel_vals)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.85)
retval, labels, centers = cv.kmeans(pixel_vals, k, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
segmented_data = centers[labels.flatten()]
segmented_image = segmented_data.reshape((image.shape))
return segmented_image
class Falsecolor:
@staticmethod
def falsecolor_jet(img):
"""False color jet
:Parameters: image
:Returns: image
"""
im_color = cv.applyColorMap(img, cv.COLORMAP_JET)
return im_color
@staticmethod
def falsecolor_rainbow(img):
"""False color rainbow
:Parameters: image
:Returns: image
"""
im_color = cv.applyColorMap(img, cv.COLORMAP_RAINBOW)
return im_color
@staticmethod
def falsecolor_transfer(source, target):
""" convert RGB to LAB color space
:Parameters: source_image, target_image
:Returns: image
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv.cvtColor(source, cv.COLOR_GRAY2BGR)
target = cv.cvtColor(target, cv.COLOR_GRAY2BGR)
source = cv.cvtColor(source, cv.COLOR_BGR2LAB).astype("float32")
target = cv.cvtColor(target, cv.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = _image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = _image_stats(target)
# subtract the means from the target image
(l, a, b) = cv.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
# scale by the standard deviations
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip the pixel intensities to [0, 255] if they fall outside
# this range
l = np.clip(l, 0, 255)
a = np.clip(a, 0, 255)
b = np.clip(b, 0, 255)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv.merge([l, a, b])
transfer = cv.cvtColor(transfer.astype("uint8"), cv.COLOR_LAB2BGR)
# return the color transferred image
return transfer
@staticmethod
def falsecolor_merge2channels(img0, img1):
"""Merge 2 images using 2 colors
:Parameters: image1, image2
:Returns: image
"""
img0 = Image.Convert.toGray(img0)
img1 = Image.Convert.toGray(img1)
img0 = Image.Adjust.histostretch_clahe(img0)
img1 = Image.Adjust.histostretch_clahe(img1)
img0 = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
r0, g0, b0 = cv.split(img0)
r1, g1, b1 = cv.split(img1)
img3 = cv.merge([b1, g1, r0])
return img3
@staticmethod
def falsecolor_merge3channels(img0, img1, img2):
"""Merge 3 images using 3 colors
:Parameters: image1, image2, image3
:Returns: image
"""
img0 = Image.Adjust.histostretch_clahe(img0)
img1 = Image.Adjust.histostretch_clahe(img1)
img2 = Image.Adjust.histostretch_clahe(img2)
img0 = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
r0, g0, b0 = cv.split(img0)
r1, g1, b1 = cv.split(img1)
r2, g2, b2 = cv.split(img2)
img3 = cv.merge([b2, g1, r0])
return img3
class Adjust:
@staticmethod
def invert(img):
"""Invert image
:Parameters: image
:Returns: image
"""
img2 = cv.bitwise_not(img)
return img2
@staticmethod
def squared_and_bin(img):
"""First make image squared followed by binning to 256 pixels
:Parameters: image
:Returns: image
"""
img0 = Image.Tools.squared(img, leadingaxislargest=False)
scale = 256 / img0.shape[1]
img0 = cv.resize(img0, None, None, scale, scale, interpolation=cv.INTER_AREA)
return img0
@staticmethod
def bin(img, shrinkfactor=2):
"""bin image with shrinkfactor (default shrinkfactor= 2)
:Parameters: image, shrinkfactor
:Returns: image
"""
scale = 1 / shrinkfactor
img0 = cv.resize(img, None, None, scale, scale, interpolation=cv.INTER_AREA)
return img0
@staticmethod
def histogram(img):
"""create histogram of an image as an image
:Parameters: image
:Output: histogram image
"""
w = img.shape[1]
h = img.shape[0]
if (img.dtype == np.uint8):
rng = 256
else:
rng = 65535
# bitdepth = img.dtype
hist, bins = np.histogram(img.flatten(), 256, [0, rng])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max() / cdf.max() # this line not necessary.
fig = plt.figure()
plt.plot(cdf_normalized, color='b')
plt.hist(img.flatten(), 256, [0, rng], color='0.30')
plt.axis("off") # turns off axes
fig.tight_layout()
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
out = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
# cv.imwrite("test.png",out)
return out
@staticmethod
def histostretch_clahe(img):
"""Apply a CLAHE (Contrast Limited Adaptive Histogram Equalization) filter on a grayscale image
supports 8 and 16 bit images.
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
if (len(img.shape) < 3):
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
img = cl1
else:
clahe = cv.createCLAHE(clipLimit=3., tileGridSize=(8, 8))
lab = cv.cvtColor(img, cv.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv.merge((l2, a, b)) # merge channels
img = cv.cvtColor(lab, cv.COLOR_LAB2BGR) # convert from LAB to BGR
return img
'''
@staticmethod
def histostretch_equalized(img):
"""Apply a equalize histogram filter (8-bit images only!)
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
equ = cv.equalizeHist(img)
return equ
'''
@staticmethod
def histostretch_equalized(img):
"""Apply a equalize histogram filter
8 and 16 bit
:Parameters: image
:Returns: image
#https://github.com/torywalker/histogram-equalizer/blob/master/HistogramEqualization.ipynb
"""
def get_histogram(image, bins):
# array with size of bins, set to zeros
histogram = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for pixel in image:
histogram[pixel] += 1
# return our final result
return histogram
# create our cumulative sum function
def cumsum(a):
a = iter(a)
b = [next(a)]
for i in a:
b.append(b[-1] + i)
return np.array(b)
if (img.dtype == np.uint16):
flat = img.flatten()
hist = get_histogram(flat, 65536)
# plt.plot(hist)
#
cs = cumsum(hist)
# re-normalize cumsum values to be between 0-255
# numerator & denomenator
nj = (cs - cs.min()) * 65535
N = cs.max() - cs.min()
# re-normalize the cdf
cs = nj / N
cs = cs.astype('uint16')
img_new = cs[flat]
# plt.hist(img_new, bins=65536)
# plt.show(block=True)
img_new = np.reshape(img_new, img.shape)
else:
if len(img.shape) == 2:
img_new = cv.equalizeHist(img)
else:
img_yuv = cv.cvtColor(img, cv.COLOR_BGR2YUV) # equalize the histogram of the Y channel
img_yuv[:, :, 0] = cv.equalizeHist(img_yuv[:, :, 0]) # convert the YUV image back to RGB format
img_new = cv.cvtColor(img_yuv, cv.COLOR_YUV2BGR)
return img_new
@staticmethod
def histostretch_normalize(img):
"""Normalize histogram
8bit between 0 and 255
16bit between 0 and 65535
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
if (img.dtype == np.uint16):
norm = cv.normalize(img, None, alpha=0, beta=65535, norm_type=cv.NORM_MINMAX, dtype=cv.CV_16U)
else:
norm = cv.normalize(img, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
return norm
# smooth, threshold
@staticmethod
def threshold(img, thresh=128):
"""Applies a fixed-level threshold to each array element. [0-255]
:Parameters: image, threshold
:Returns: image
"""
ret, imout = cv.threshold(img, thresh, 255, cv.THRESH_BINARY)
return imout
@staticmethod
def normalize(img):
"""Normalize image. [0-255]
:Parameters: image
:Returns: image
"""
imout = cv.normalize(img, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_64F)
return imout
@staticmethod
def thresholdrange(img, threshmin=128, threshmax=255):
"""threshold image between min and max value
:Parameters: image, thresholdmin, thresholdmax
:Returns: image
"""
imout = cv.inRange(img, threshmin, threshmax)
return imout
@staticmethod
def threshold_otsu(img):
"""Applies an automatic threshold using the Otsu method for thresholding
:Parameters: image
:Returns: image
"""
ret, imout = cv.threshold(img, 0, 255, cv.THRESH_OTSU)
return imout
@staticmethod
def adjust_contrast_brightness(img, contrast=0, brightness=0):
"""adjust contrast and brightness
contrast range: -127..127
brightness range: -255..255
:Parameters: image
:Returns: image
"""
table = np.array([i * (contrast / 127 + 1) - contrast + brightness for i in range(0, 256)]).clip(0,
255).astype(
'uint8')
# if len(img.shape) == 3:
# out = cv.LUT(img, table)[:, :, np.newaxis]
# else:
out = cv.LUT(img, table)
return out
@staticmethod
def adjust_gamma(image, gamma=1.0):
"""adjust gamma [0..3.0], default = 1
gamma cannot be 0
:Parameters: image, gamma=1.0
:Returns: image
"""
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv.LUT(image, table)
@staticmethod
def adjust_HSV(img, hval, sval, vval):
"""adjust Hue [0..179], Saturation [-255..255], lightness [-255..255]
:Parameters: image, hue, saturation, lightness
:Returns: image
"""
img = Image.Convert.toRGB(img) # changing channels for nicer image
hsv = Image.Convert.BGRtoHSV(img)
h = hsv[:, :, 0]
s = hsv[:, :, 1]
v = hsv[:, :, 2]
h = np.where(h <= 255.0 - hval, h + hval, 255)
if (sval > 0):
s = np.where(s <= 255.0 - sval, s + sval, 255)
else:
s = (s * ((255.0 + sval) / 255.0))
if (vval > 0):
v = np.where(v <= 255.0 - vval, v + vval, 255)
else:
v = v * ((255.0 + vval) / 255.0)
hsv[:, :, 0] = h
hsv[:, :, 1] = s
hsv[:, :, 2] = v
img1 = Image.Convert.HSVtoBGR(hsv)
return img1
@staticmethod
def adjust_HSL(img, hval, sval, lval):
"""adjust Hue [0..179], Saturation [0..255], lightness [0..255]
The definition HSL is most commonly used, occasionly this is called HLS
:Parameters: image, hue, saturation, lightness
:Returns: image
"""
img = Image.Convert.toRGB(img) # changing channels for nicer image
hls = cv.cvtColor(img, cv.COLOR_RGB2HLS)
h = hls[:, :, 0]
l = hls[:, :, 1]
s = hls[:, :, 2]
h = np.where(h <= 255.0 - hval, h + hval, 255)
if (sval > 0):
s = np.where(s <= 255.0 - sval, s + sval, 255)
else:
s = (s * ((255.0 + sval) / 255.0))
if (lval > 0):
l = np.where(l <= 255.0 - lval, l + lval, 255)
else:
l = l * ((255.0 + lval) / 255.0)
hls[:, :, 0] = h
hls[:, :, 1] = l
hls[:, :, 2] = s
img1 = cv.cvtColor(hls, cv.COLOR_HLS2RGB)
return img1
@staticmethod
def adjust_auto_whitebalance(img):
"""auto whitebalance
https://stackoverflow.com/questions/46390779/automatic-white-balancing-with-grayworld-assumption
:Parameters: image, temperature
:Returns: image
"""
result = cv.cvtColor(img, cv.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv.cvtColor(result, cv.COLOR_LAB2BGR)
return result
class Transform:
@staticmethod
def flip_horizontal(img):
"""Flip image horizontal
:Parameters: image
:Returns: image
"""
horizontal_img = cv.flip(img, 0)
return horizontal_img
@staticmethod
def flip_vertical(img):
"""Flip image vertical
:Parameters: image
:Returns: image
"""
vertical_img = cv.flip(img, 1)
return vertical_img
@staticmethod
def translate(img, shiftx, shifty):
"""Shift image n x and y pixels
:Parameters: image, shiftx, shifty
:Returns: image
"""
w = img.shape[1]
h = img.shape[0]
M = np.float32([[1, 0, shiftx], [0, 1, shifty]])
img2 = cv.warpAffine(img, M, (w, h))
return img2
@staticmethod
def rotate(image, angle):
"""Rotate image
:Parameters: image, angle
:Returns: image
"""
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv.getRotationMatrix2D(image_center, angle, 1.0)
result = cv.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv.INTER_LINEAR)
return result
class Binary:
@staticmethod
def skeletonize(img):
"""skeletonize a thresholded image.
:Parameters: image
:Returns: image
"""
size = np.size(img)
skel = np.zeros(img.shape, np.uint8)
element = cv.getStructuringElement(cv.MORPH_CROSS, (3, 3))
done = False
while (not done):
eroded = cv.erode(img, element)
temp = cv.dilate(eroded, element)
temp = cv.subtract(img, temp)
skel = cv.bitwise_or(skel, temp)
img = eroded.copy()
zeros = size - cv.countNonZero(img)
if zeros == size:
done = True
return skel
# Zhang-Suen Thinning Algorithm - https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
# note: slow filter
@staticmethod
def thinning(img):
"""Applies the Zhang-Suen thinning algorithm.
:Parameters: image
:Returns: image
"""
def neighbours(x, y, img):
"Return 8-neighbours of image point P1(x,y), in a clockwise order"
img = img
x_1, y_1, x1, y1 = x - 1, y - 1, x + 1, y + 1
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], # P2,P3,P4,P5
img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]] # P6,P7,P8,P9
def transitions(neighbours):
"No. of 0,1 patterns (transitions from 0 to 1) in the ordered sequence"
n = neighbours + neighbours[0:1] # P2, P3, ... , P8, P9, P2
return sum((n1, n2) == (0, 1) for n1, n2 in zip(n, n[1:])) # (P2,P3), (P3,P4), ... , (P8,P9), (P9,P2)
ret, imout = cv.threshold(img, 0, 255, cv.THRESH_OTSU)
img = img < ret # must set object region as 1, background region as 0 !
print("the Zhang-Suen Thinning Algorithm")
img_Thinned = img.copy() # deepcopy to protect the original img
changing1 = changing2 = 1 # the points to be removed (set as 0)
while changing1 or changing2: # iterates until no further changes occur in the img
# Step 1
changing1 = []
rows, columns = img_Thinned.shape # x for rows, y for columns
for x in range(1, rows - 1): # No. of rows
for y in range(1, columns - 1): # No. of columns
P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(x, y, img_Thinned)
if (img_Thinned[x][y] == 1 and # Condition 0: Point P1 in the object regions
2 <= sum(n) <= 6 and # Condition 1: 2<= N(P1) <= 6
transitions(n) == 1 and # Condition 2: S(P1)=1
P2 * P4 * P6 == 0 and # Condition 3
P4 * P6 * P8 == 0): # Condition 4
changing1.append((x, y))
for x, y in changing1:
img_Thinned[x][y] = 0
# Step 2
changing2 = []
for x in range(1, rows - 1):
for y in range(1, columns - 1):
P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(x, y, img_Thinned)
if (img_Thinned[x][y] == 1 and # Condition 0
2 <= sum(n) <= 6 and # Condition 1
transitions(n) == 1 and # Condition 2
P2 * P4 * P8 == 0 and # Condition 3
P2 * P6 * P8 == 0): # Condition 4
changing2.append((x, y))
for x, y in changing2:
img_Thinned[x][y] = 0
return img_Thinned
@staticmethod
def morphology_erode(img, kernel=5):
"""Morphology filter - erode
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
erosion = cv.erode(img, kerneln, iterations=1)
return erosion
@staticmethod
def morphology_dilate(img, kernel=5):
"""Morphology filter - dilate
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
dilation = cv.dilate(img, kerneln, iterations=1)
return dilation
@staticmethod
def morphology_open(img, kernel=5):
"""Morphology filter - open
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
opening = cv.morphologyEx(img, cv.MORPH_OPEN, kerneln)
return opening
@staticmethod
def morphology_close(img, kernel=5):
"""Morphology filter - close
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
opening = cv.morphologyEx(img, cv.MORPH_CLOSE, kerneln)
return opening
@staticmethod
def morphology_fillholes(im_in):
"""Morphology filter - fillholes
:Parameters: image, kernel
:Returns: image
"""
im_floodfill = im_in.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_in.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_in | im_floodfill_inv
return im_in, im_floodfill, im_floodfill_inv, im_out
@staticmethod
def remove_isolated_pixels(img0):
"""Remove isolated pixels in an image
:Parameters: image
:Returns: image
"""
input_image = cv.threshold(img0, 254, 255, cv.THRESH_BINARY)[1]
input_image_comp = cv.bitwise_not(input_image) # could just use 255-img
kernel1 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], np.uint8)
kernel2 = np.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]], np.uint8)
hitormiss1 = cv.morphologyEx(input_image, cv.MORPH_ERODE, kernel1)
hitormiss2 = cv.morphologyEx(input_image_comp, cv.MORPH_ERODE, kernel2)
hitormiss = cv.bitwise_and(hitormiss1, hitormiss2)
hitormiss_comp = cv.bitwise_not(hitormiss) # could just use 255-img
del_isolated = cv.bitwise_and(input_image, input_image, mask=hitormiss_comp)
return del_isolated
@staticmethod
def remove_islands(img0, min_size=150):
"""Remove islands in an image
:Parameters: image, min_size=150
:Returns: image
"""
# find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(img0, connectivity=8)
# connectedComponentswithStats yields every seperated component with information on each of them, such as size
# the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]
nb_components = nb_components - 1
# minimum size of features we want to keep (number of pixels)
# here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever
# your answer image
img2 = np.zeros((output.shape))
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
return img2
class Convert:
@staticmethod
def to8bit(img):
"""Convert to 8 bit image
:Parameters: image
:Returns: image
"""
if (img.dtype == np.uint16):
img1 = (img / 256).astype('uint8') # updated this one on 20191216 for 16 bit imaging
else:
img1 = (img).astype('uint8')
# img1 = img.astype('uint8') # 16bit to 8bit
return img1
@staticmethod
def to16bit(img):
"""Convert to 16 bit image
:Parameters: image
:Returns: image
"""
if (img.dtype == np.uint8):
img1 = (img * 256).astype('uint16') # updated this one on 20191216 for 16 bit imaging
else:
img1 = (img).astype('uint16')
# img1 = img.astype('uint8') # 16bit to 8bit
return img1
@staticmethod
def toRGB(img):
"""Convert grayscale to RGB image
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels != 3):
img1 = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
# print('Image converted from Grayscale to RGB')
return img1
@staticmethod
def toGray(img):
"""Convert RGB to color grayscale image
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
img1 = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# print('Image converted from RGB to Grayscale')
return img1
@staticmethod
def BGRtoRGB(img):
"""Convert BGR to RGB
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
b, g, r = cv.split(img) # get b,g,r
img1 = cv.merge([r, g, b]) # switch it to rgb (OpenCV uses BGR)
return img1
@staticmethod
def RGBtoBGR(img):
"""Convert RGB to BGR
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
r, g, b = cv.split(img) # get b,g,r
img1 = cv.merge([b, g, r]) # switch it to rgb (OpenCV uses BGR)
return img1
@staticmethod
def BGRtoHSV(img):
"""Convert BGR to HSV
:Parameters: image
:Returns: image
"""
img1 = cv.cvtColor(img, cv.COLOR_BGR2HSV)
return img1
@staticmethod
def HSVtoBGR(img):
"""Convert HSV to BGR
:Parameters: image
:Returns: image
"""
img1 = cv.cvtColor(img, cv.COLOR_HSV2BGR)
return img1
@staticmethod
def binarytogray(img):
"""Convert binary image to grayscale (dtype=bool -> dtype=uint8)
:Parameters: image
:Returns: image
"""
img = img.astype('uint8') * 255
return img
class FilterKernels:
@staticmethod
def ideal_lowpass_kernel(img, radius=32):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = np.copy(d)
kernel[kernel < pow(radius, 2.0)] = 1
kernel[kernel >= pow(radius, 2.0)] = 0
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def gaussian_lowpass_kernel(img, radius=32):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = np.exp(-d / (2 * pow(radius, 2.0)))
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def butterworth_lowpass_kernel(img, radius=32, n=2):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def ideal_bandpass_kernel(img, D0=32, w=9):
rows, cols = img.shape
crow, ccol = int(rows / 2), int(cols / 2)
mask = np.ones((rows, cols), np.uint8)
for i in range(0, rows):
for j in range(0, cols):
d = np.sqrt(pow(i - crow, 2) + pow(j - ccol, 2))
if D0 - w / 2 < d < D0 + w / 2:
mask[i, j] = 1
else:
mask[i, j] = 0
kernel = mask
return kernel
@staticmethod
def ideal_bandstop_kernel(img, D0=32, W=9):
kernel = 1.0 - Image.FilterKernels.ideal_bandpass_kernel(img, D0, W)
return kernel
@staticmethod
def gaussian_bandstop_kernel(img, D0=32, W=9):
r, c = img.shape[1], img.shape[0]
u = np.arange(r)
v = np.arange(c)
u, v = np.meshgrid(u, v)
low_pass = np.sqrt((u - r / 2) ** 2 + (v - c / 2) ** 2)
kernel = 1.0 - np.exp(-0.5 * (((low_pass ** 2 - D0 ** 2) / (low_pass * W + 1.0e-5)) ** 2))
return kernel
@staticmethod
def gaussian_bandpass_kernel(img, D0=32, W=9):
assert img.ndim == 2
# kernel = Image.FilterKernels.gaussian_bandstop_kernel(img, D0, W)
kernel = 1.0 - Image.FilterKernels.gaussian_bandstop_kernel(img, D0, W)
return kernel
@staticmethod
def butterworth_bandstop_kernel(img, D0=32, W=9, n=1):
r, c = img.shape[1], img.shape[0]
u = np.arange(r)
v = np.arange(c)
u, v = np.meshgrid(u, v)
low_pass = np.sqrt((u - r / 2) ** 2 + (v - c / 2) ** 2)
kernel = (1 / (1 + ((low_pass * W) / (low_pass ** 2 - D0 ** 2)) ** (2 * n)))
return kernel
def butterworth_bandpass_kernel(img, D0=5, W=10):
kernel = 1.0 - Image.FilterKernels.butterworth_bandstop_kernel(img, D0, W)
return kernel
'''
def convert_kernel_to_image(kernel):
out = np.dstack((kernel, np.zeros(kernel.shape[:-1])))
return out
'''
class Tools:
# combined sequences
@staticmethod
def image_with_2_closeups(img, t_size=[0.2, 0.2], t_center1=[0.3, 0.3], t_center2=[0.6, 0.6]):
"""image with 2 closeups, the output is a color image.
:Parameters: image, t_size=[0.2, 0.2], t_center1=[0.3, 0.3], t_center2=[0.6, 0.6]
:Returns: image
"""
w = img.shape[1]
h = img.shape[0]
rgb = Image.Convert.toRGB(img)
xt0 = Image._multipleof2((t_center1[0] - t_size[0] * 0.5) * w)
yt0 = Image._multipleof2((t_center1[1] - t_size[1] * 0.5) * h)
xt1 = Image._multipleof2((t_center1[0] + t_size[0] * 0.5) * w)
yt1 = Image._multipleof2((t_center1[1] + t_size[1] * 0.5) * h)
# rgb = img
template1 = Image.crop(rgb, xt0, yt0, xt1, yt1)
w3 = np.abs(xt0 - xt1)
h3 = np.abs(yt0 - yt1)
xt0b = Image._multipleof2((t_center2[0] - t_size[0] * 0.5) * w)
yt0b = Image._multipleof2((t_center2[1] - t_size[1] * 0.5) * h)
# rgb = img
template2 = Image.crop(rgb, xt0b, yt0b, xt0b + w3, yt0b + h3)
wt = template1.shape[1]
ht = template1.shape[0]
scalefactor = (w * 0.5) / wt
template1b = Image.resize(template1, scalefactor)
# print(template1b.shape)
wt2 = template1b.shape[1]
ht2 = template1b.shape[0]
template2b = Image.resize(template2, scalefactor)
# print(template2b.shape)
# print(w,h)
# print(wt2,ht2)
output = np.zeros((h + ht2, w, 3), np.uint8)
print(output.shape)
print(rgb.shape)
print(template1b.shape)
print(template2b.shape)
output[0:h, 0:w] = rgb
output[h:h + ht2, 0:wt2] = template1b
output[h:h + ht2, wt2:w] = template2b
output = cv.rectangle(output, (xt0, yt0), (xt1, yt1), (33, 145, 237), 3)
output = cv.rectangle(output, (xt0b, yt0b), (xt0b + w3, yt0b + h3), (240, 167, 41), 3)
output = cv.rectangle(output, (wt2 + 3, h), (w - 2, h + ht2 - 3), (240, 167, 41), 3)
output = cv.rectangle(output, (0 + 2, h), (wt2 - 2, h + ht2 - 3), (33, 145, 237), 3)
return output
@staticmethod
def anaglyph(img0, img1):
"""Create a anaglyph from 2 images (stereo image)
:Parameters: image1, image2
:Returns: image
"""
matrices = {
'true': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0.299, 0.587, 0.114]],
'mono': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0.299, 0.587, 0.114, 0.299, 0.587, 0.114]],
'color': [[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
'halfcolor': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
'optimized': [[0, 0.7, 0.3, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
}
# img1 = translate_image(img1,8,0)
width = img0.shape[0]
height = img0.shape[1]
leftImage = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
rightImage = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
m = matrices['optimized']
result = np.zeros((img0.shape[0], img0.shape[1], 3), np.uint8)
# split the left and right images into separate blue, green and red images
lb, lg, lr = cv.split(np.asarray(leftImage[:, :]))
rb, rg, rr = cv.split(np.asarray(rightImage[:, :]))
resultArray = np.asarray(result[:, :])
resultArray[:, :, 0] = lb * m[0][6] + lg * m[0][7] + lr * m[0][8] + rb * m[1][6] + rg * m[1][7] + rr * m[1][
8]
resultArray[:, :, 1] = lb * m[0][3] + lg * m[0][4] + lr * m[0][5] + rb * m[1][3] + rg * m[1][4] + rr * m[1][
5]
resultArray[:, :, 2] = lb * m[0][0] + lg * m[0][1] + lr * m[0][2] + rb * m[1][0] + rg * m[1][1] + rr * m[1][
2]
return result
@staticmethod
def image2patches(img, patchsize, overlappx=0, verbose=False):
"""
Convert single image to a list of patches.
The size of a patch is determined by patchsize, be aware of rounding incase image width or height cannot be divided through the patchsize.
Works both for color and grayscale images.
overlap in pixels (default overlap=0)
:Parameters: image, rows, cols
:Returns: image_list
"""
h0, w0 = img.shape[0], img.shape[1]
# determine number of steps (rows and columns
cols = int(np.round(w0 / patchsize, 0))
rows = int(np.round(h0 / patchsize, 0))
if (cols < 1):
cols = 1
if (rows < 1):
rows = 1
h0_size = int(h0 / rows + 0.5)
w0_size = int(w0 / cols + 0.5)
# add black border to image
bordersize = int(overlappx) # require bordersize of the patches
channels = len(img.shape)
if (channels == 3):
# color image
base_size = h0 + bordersize * 2, w0 + bordersize * 2, 3
base = np.zeros((base_size), np.uint8)
else:
base_size = h0 + bordersize * 2, w0 + bordersize * 2
base = np.zeros((base_size), np.uint8)
# base = np.zeros(base_size, dtype=np.uint8)
base[bordersize:h0 + bordersize, bordersize:w0 + bordersize] = img # this works
# make patches with overlap
patches = []
for row in range(rows):
for col in range(cols):
yc = int((row + 0.5) * h0_size) + bordersize
xc = int((col + 0.5) * w0_size) + bordersize
x0 = int(xc - (w0_size * 0.5) - bordersize)
y0 = int(yc - (h0_size * 0.5) - bordersize)
x1 = int(xc + (w0_size * 0.5) + bordersize)
y1 = int(yc + (h0_size * 0.5) + bordersize)
patch = base[y0:y1, x0:x1]
patches.append(patch)
if verbose == True:
print(
"image2patches: patches {}, source_width {}, source_height {},rows {}, columns {}, output: patches,cols".format(
len(patches), w0, h0, rows, cols))
return patches, cols
@staticmethod
def patches2image(images, cols=5, overlappx=0, whitebackground=True, verbose=False):
"""
Stitch a list of image patches to a single image. The number of columns determines the next line.
Works both for color and grayscale images.
overlap in pixels (default overlap=0)
Other definitions often used for this process: image montage or image stitching
when cols is set to 0 rows and cols will be equal.
:Parameters: imagelist, cols=5, overlap_perc=0, whitebackground=True
:Returns: image
"""
if (cols == 0):
cols = int(np.math.sqrt(len(images)))
rows = cols
if verbose == True:
print('patches2image equal rows and columns')
else:
if (cols > len(images)):
cols = len(images)
rows = int(len(images) / cols)
if (rows * cols) < len(images):
cols = cols + (len(images) - (rows * cols)) # number of total images should be correct
maxwidth = max(image.shape[1] for image in images)
maxheight = max(image.shape[0] for image in images)
gap = int(-overlappx * 2.)
# maxwidth = maxwidth
# maxheight = maxheight
height = maxheight * rows + (gap * (rows - 1))
width = maxwidth * cols + (gap * (cols - 1))
# output = np.zeros((height, width), np.uint8)
if verbose == True:
print(
"patches2image images {}, new_width {}, new_height {}, rows {}, cols {}, gap {}".format(len(images),
width,
height,
rows, cols,
gap))
channels = len(images[0].shape)
if (channels == 3):
# color image
output = np.zeros((height, width, 3), np.uint8)
else:
output = np.zeros((height, width), np.uint8)
if (whitebackground == True):
cv.bitwise_not(output, output)
x = 0
y = 0
for image in images:
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # changing image to grayscale
h, w = image.shape[0], image.shape[1]
output[(y * h + gap * y):((y + 1) * h + gap * y), (x * w + gap * x):((x + 1) * w + gap * x)] = image
x += 1
if (x > (cols - 1)):
x = 0
y += 1
# out = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB) # and back
h4, w4 = output.shape[0], output.shape[1]
out = output[overlappx:h4 - overlappx, overlappx:w4 - overlappx]
return out
@staticmethod
def patches2disk(folder, patches):
"""
Save list of patches to disk
:Parameters: path patches
"""
for t in range(0, len(patches)):
cv.imwrite(os.path.join(folder, "patch_{0}.png".format(t)), patches[t])
@staticmethod
def create_hsv_map():
"""
generate a HSV Map pattern
:Parameters: -
:Returns: image
"""
V, H = np.mgrid[0:1:100j, 0:1:300j]
S = np.ones_like(V)
HSV = np.dstack((H, S, V))
out = hsv_to_rgb(HSV)
# plt.imshow(out)
# out = Image.Convert.HSVtoBGR(np.float32(HSV))
# out = Image.Convert.BGRtoRGB(out)
return out
@staticmethod
def create_checkerboard(rows_num=10, columns_num=10, block_size=30, base_col=(255, 255, 255)):
"""
generate a checkerboard pattern
:Parameters: rows, columns, blocksize, base color
:Returns: image
"""
base_color = tuple(map(int, base_col))
block_size = block_size * 4
image_width = block_size * columns_num
image_height = block_size * rows_num
inv_color = tuple(255 - val for val in base_color),
checker_board = np.zeros((image_height, image_width, 3), np.uint8)
color_row = 0
color_column = 0
for i in range(0, image_height, block_size):
color_row = not color_row
color_column = color_row
for j in range(0, image_width, block_size):
checker_board[i:i + block_size, j:j +
block_size] = base_color if color_column else inv_color
color_column = not color_column
return checker_board
@staticmethod
def fisheye_calibrate(imagelist):
"""
find fisheye correction values from multiple images containing the checkerboard
:Parameters: imagelist
:Returns: image
"""
# https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
CHECKERBOARD = (10, 10)
subpix_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.1)
calibration_flags = cv.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv.fisheye.CALIB_CHECK_COND + cv.fisheye.CALIB_FIX_SKEW
objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
_img_shape = None
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for img in imagelist:
_img_shape = img.shape[:2]
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, CHECKERBOARD,
cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_NORMALIZE_IMAGE)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv.cornerSubPix(gray, corners, (3, 3), (-1, -1), subpix_criteria)
imgpoints.append(corners)
N_OK = len(objpoints)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [ | np.zeros((1, 1, 3), dtype=np.float64) | numpy.zeros |
from numpy import *#载入numpy库
import numpy as np
import operator#载入operator模块
from os import listdir#从os模块导入listdir,可以给出给定目录文件名
from sklearn.neighbors import KNeighborsClassifier as kNN#载入sklearn库
def createDataSet():#建立数据集
group=array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels=['A','A','B','B']
return group,labels
def classify0(inX,dataSet,labels,k):#该函数为简单kNN分类器
#首先计算已知类别数据集与当前点的距离
dataSetSize=dataSet.shape[0] #读取数据集的行数,并把行数放到dataSetSize里,shape[]用来读取矩阵的行列数,shape[1]表示读取列数
diffMat=tile(inX,(dataSetSize,1))-dataSet #tile(inX,(dataSetSize,1))复制比较向量inX,tile的功能是告诉inX需要复制多少遍,这
#里复制成(dataSetSize行,一列)目的是把inX转化成与数据集相同大小,再与数据集矩阵相减,形成的差值矩阵存放在diffMat里
sqDiffMat=diffMat**2#注意这里是把矩阵李的各个元素依次平方,如([-1,-1.1],[-1,-1])执行该操作后为([1,1.21],[1,1])
sqDistances=sqDiffMat.sum(axis=1)#实现计算计算结果,axis表矩阵每一行元素相加,如([1,1.21],[1,1]),执行该操作后为(2.21,2)
distances=sqDistances**0.5#开根号
#按照距离递增次序排序
sortedDisIndicies=distances.argsort()#使用argsort排序,返回从小到大到“顺序值”
#如{2,4,1}返回{1,2,0},依次为其顺序到索引
classCount={}#新建一个字典,用于计数
#选取与当前点距离最小的k个点
for i in range(k):#按顺序对标签进行计数
voteIlabel=labels[sortedDisIndicies[i]]#按照之前排序值依次对标签进行计数
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1#对字典进行抓取,此时字典是空的
#所以没有标签,现在將一个标签作为key,value就是label出现次数,因为数组从0开始,但计数从1
#开始,故需要加1
sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
#返回一个列表按照第二个元素降序排列
return sortedClassCount[0][0]#返回出现次数最多到label值,即为当前点的预测分类
def file2matrix(filename):#將文本记录转化为转化为Numpy矩阵
fr=open(filename)#打开文件,存到fr里
arrayOLines=fr.readlines()#按行读取,并存到arrOlines里
numberOfLines=len(arrayOLines)#读取其行数
returnMat=zeros((numberOfLines,3))#建立文本文件行数,3列的矩阵,以后整理的文件存在这里面
classLabelVector=[]#建立一个单列矩阵,存储其类
index=0#索引值先清0
#按行读取文本,并依次给其贴标签
for line in arrayOLines:
line=line.strip()#將文本每一行首尾的空格去掉
listFromLine=line.split('\t')#矩阵中,每遇到一个'\t',便依次將这一部分赋给一个元素
returnMat[index,:]=listFromLine[0:3]#將每一行的前三个元素依次赋予之前预留矩阵空间
#classLabelVector.append(int(float(listFromLine[-1])))
#对于每行最后一列,按照其值的不同,来给单列矩阵赋值
if(listFromLine[-1]=='largeDoses'):
classLabelVector.append(3)
elif listFromLine[-1]=='smallDoses':
classLabelVector.append(2)
elif listFromLine[-1]=='didntLike':
classLabelVector.append(1)
index+=1#每执行一次,便向下一行再循环
return returnMat,classLabelVector#返回两个矩阵,一个是三个特征组成的特征矩阵,另一个为类矩阵
def autoNorm(dataSet):#对每个特征进行归一化处理
minVals=dataSet.min(0)#取数据集最大值
maxVals=dataSet.max(0)#取数据集最小值
ranges=maxVals-minVals#取差值即为范围
normDataSet=zeros(np.shape(dataSet))#建立一个新0矩阵,其行数列数与数据集一致,处理后数据存这里
m=dataSet.shape[0]#读取数据集行数
normDataSet=dataSet- | np.tile(minVals,(m,1)) | numpy.tile |
import os
import json
import numpy as np
from scipy.stats import truncnorm
from .light_action import TrafficLightAction, Acceleration
from .light_state import TrafficLightState
from .light_observation import TrafficLightObservation
from .light_data import TrafficLightData, Belief
from .util import Acceleration, LightColor
from .util import max_distance, state_to_color_index, calculate_trunc_norm_prob
from .util import MIN_DISTANCE_OBS, MAX_DISTANCE_OBS, MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS, INDEX_TO_ACTION
from pomdpy.pomdp import model
from pomdpy.discrete_pomdp import DiscreteActionPool
from pomdpy.discrete_pomdp import DiscreteObservationPool
class TrafficLightModel(model.Model):
def __init__(self, problem_name="TrafficLight"):
super().__init__(problem_name)
self.num_actions = len(Acceleration)
path = os.path.join(*__name__.split('.')[:-1], "config.json")
with open(path, "rt") as fp:
self.config = json.load(fp)
self.init_speed = self.config["init_speed"]
def start_scenario(self):
position = self.config["init_position"]
speed = self.config["init_speed"]
light = self.config["init_light"]
return TrafficLightState(position, speed, light)
''' --------- Abstract Methods --------- '''
def is_terminal(self, state):
return state.position >= self.road_length + self.intersection_length
def sample_an_init_state(self):
random_position = np.random.randint(self.config["road_length"] // 2)
speed = self.init_speed
random_light = np.random.randint(sum(self.config["light_cycle"]))
return TrafficLightState(random_position, speed, random_light)
def create_observation_pool(self, solver):
return DiscreteObservationPool(solver)
def sample_state_uninformed(self):
random_position = np.random.randint(self.config["road_length"] // 2)
random_speed = np.random.randint(self.config["speed_limit"])
random_light = np.random.randint(sum(self.config["light_cycle"]))
return TrafficLightState(random_position, random_speed, random_light)
def sample_state_informed(self, belief):
return belief.sample_particle()
def get_all_states(self):
states = []
for position in range(len(self.road_length)):
for speed in range(self.max_speed):
for light in range(sum(self.light_cycle)):
states.append(TrafficLightState(position, speed, light))
return states
def get_all_actions(self):
return [TrafficLightAction(index) for index in INDEX_TO_ACTION]
def get_all_observations(self):
observations = []
for distance_measurement in range(MIN_DISTANCE_OBS, MAX_DISTANCE_OBS + 1):
for wavelength_measurement in range(MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS + 1):
for speed in range(self.config["max_speed"] + 1):
observations.append(TrafficLightObservation((distance_measurement, wavelength_measurement, speed)))
return observations
def get_legal_actions(self, state):
legal_actions = []
for index in INDEX_TO_ACTION:
if state.speed + INDEX_TO_ACTION[index] >= 0 and state.speed + INDEX_TO_ACTION[index] <= self.config["max_speed"]:
legal_actions.append(TrafficLightAction(index))
return legal_actions
def is_valid(self, state):
return state.position >= 0 and state.speed >= 0
def reset_for_simulation(self):
self.start_scenario()
def reset_for_epoch(self):
self.start_scenario()
def update(self, sim_data):
pass
def get_max_undiscounted_return(self):
return 10
@staticmethod
def state_transition(state, action):
speed = state.speed + action
position = state.position + speed
light = (state.light) + 1 % sum(self.config["light_cycle"])
new_state = TrafficLightState(position, speed, light)
@staticmethod
def get_transition_matrix():
"""
|A| x |S| x |S'| matrix, for tiger problem this is 3 x 2 x 2
:return:
"""
action_state_state_combos = []
for action in self.get_all_actions():
state_state_combos = []
for state in self.get_all_states():
transition_state = state_transition(state, action)
state_combos = []
for state in self.get_all_states():
value = 1 if state == transition_state else 0
state_combos.append(value)
state_state_combos.append(np.array(state_combos))
action_state_combos.append(np.array(state_state_combos))
return np.array(action_state_combos)
@staticmethod
def get_observation_matrix():
"""
|A| x |S| x |O| matrix
:return:
"""
observations = []
for action in self.get_all_actions():
for state in self.get_all_states():
state_obs_probs = []
color = state_to_color_index(state)
observation_probs = []
for observation in self.get_all_observations():
if state.speed + INDEX_TO_ACTION(action.index) != observation.speed:
observation_probs.append(0)
continue
color_mean = self.config["color_means"][color]
color_std = self.config["color_stdev"]
color_probab = calculate_trunc_norm_prob(observation.wavelength_observed, color_mean, color_std, MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS)
dist_mean = state.position
dist_std = self.config["distance_stdev"]
distance_probab = calculate_trunc_norm_prob(observation.distance_observed, dist_mean, dist_std, MIN_DISTANCE_OBS, MAX_DISTANCE_OBS)
observation_probs.append(color_probab * distance_probab)
state_obs_probs.append(np.array(observation_probs))
observations.append(np.array(state_obs_probs))
return | np.array(observations) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 17:53:56 2020
@author: Leonard.<EMAIL>
"""
import numpy as _np
import copy as _copy
class Field:
"""
Lightpipes Field object, containing the field data and meta
parameters as well as helper functions to change data formats etc.
"""
@classmethod
def begin(cls, grid_size, wavelength, N):
"""
Initialize a new field object with the given parameters.
This method is preferred over direct calling of constructor.
Parameters
----------
grid_size : float
[m] physical size of the square grid
wavelength : float
[m] physical wavelength
N : int
number of grid points in each dimension (square)
Returns
-------
The initialized Field object.
"""
inst = cls(None, grid_size, wavelength, N)
return inst
@classmethod
def copy(cls, Fin):
"""
Create a copy of the input field with identical values but
no common references to numpy fields etc.
Parameters
----------
Fin : Field
Input field to copy/clone
Returns
-------
A new Field object with identical values as Fin.
"""
return _copy.deepcopy(Fin)
@classmethod
def shallowcopy(cls, Fin):
"""
Create a shallow copy of the input field, i.e. the parameters are
cloned but the reference to the numpy field is the same!
This may be useful if a function (e.g. Fresnel) returns a copied
field anyways, so a deep copy like Field.copy() would be redundant.
Parameters
----------
Fin : Field
Input field to copy (common reference to .field!)
Returns
-------
A new Field object with identical values as Fin and common reference
to .field
"""
return _copy.copy(Fin)
def __init__(self, Fin=None, grid_size=1.0, wavelength=1.0, N=0):
"""Private, use class method factories instead."""
if Fin is None:
if not N:
raise ValueError('Cannot create zero size field (N=0)')
Fin = _np.ones((N,N),dtype=complex)
else:
Fin = _np.asarray(Fin, dtype=complex)
self._field = Fin
self._lam = wavelength
self._siz = grid_size
self._int1 = 0 #remembers PipFFT direction
self._curvature = 0.0 #remembers field curvature or 0.0 for normal
def _get_grid_size(self):
"""Get or set the grid size in [m]."""
return self._siz
def _set_grid_size(self, gridsize):
self._siz = gridsize
grid_size = property(_get_grid_size, _set_grid_size)
siz = grid_size
def _get_wavelength(self):
"""Get or set the wavelength of the field. All units in [m]."""
return self._lam
def _set_wavelength(self, wavelength):
self._lam = wavelength
wavelength = property(_get_wavelength, _set_wavelength)
lam = wavelength
@property
def grid_dimension(self):
return self._field.shape[0] #assert square
N = grid_dimension
@property
def grid_step(self):
"""Distance in [m] between 2 grid points"""
return self.siz/self.N
dx = grid_step
@property
def field(self):
"""Get the complex E-field."""
return self._field
@field.setter
def field(self, field):
"""The field must be a complex 2d square numpy array.
"""
field = _np.asarray(field, dtype=complex)
#will not create a new instance if already good
self._field = field
@property
def xvalues(self):
"""
Return a 1d numpy array of the cartesian X coordinates for the pixels
of the field.
Following the matplotlib.pyplot.imshow convention:
- positive shift in x is right
- positive shift in y is down
- coords define pixel center, so extent will be
[xmin-1/2dx, xmax+1/2dx]
For an odd number of pixels this puts a pixel in the center as expected
for an even number, the "mid" pixel shifts right and down by 1
Returns
-------
A 1d numpy array of each pixels center x-coordinate
"""
w = self.N
cx = int(w/2)
xvals = self.dx * _np.arange(-cx, (w-cx))
return xvals
@property
def yvalues(self):
"""
Return a 1d numpy array of the cartesian Y coordinates for the pixels
of the field.
Following the matplotlib.pyplot.imshow convention:
- positive shift in x is right
- positive shift in y is down
- coords define pixel center, so extent will be
[xmin-1/2dx, xmax+1/2dx]
For an odd number of pixels this puts a pixel in the center as expected
for an even number, the "mid" pixel shifts right and down by 1
Returns
-------
A 1d numpy array of each pixels center y-coordinate
"""
h = self.N
cy = int(h/2)
yvals = self.dx * _np.arange(-cy, (h-cy))
return yvals
@property
def mgrid_cartesian(self):
"""Return a meshgrid tuple (Y, X) of cartesian coordinates for each
pixel of the field.
Following the matplotlib.pyplot.imshow convention:
- positive shift in x is right
- positive shift in y is down
- coords define pixel center, so extent will be
[xmin-1/2dx, xmax+1/2dx]
For an odd number of pixels this puts a pixel in the center as expected
for an even number, the "mid" pixel shifts right and down by 1
"""
"""LightPipes manual/ examples Matlab and Python version:
plotting the Intensity with imshow() yields coord sys:
positive shift in x is right
positive shift in y is down!!
-> stick to this convention where possible
Adapted from matplotlib.imshow convention: coords define pixel center,
so extent will be xmin-1/2dx, xmax+1/2dx
For an odd number of pixels this puts a pixel in the center as expected
for an even number, the "mid" pixel shifts right and down by 1
"""
h, w = self.N, self.N
cy, cx = int(h/2), int(w/2)
Y, X = _np.mgrid[:h, :w]
Y = (Y-cy)*self.dx
X = (X-cx)*self.dx
return (Y, X)
@property
def mgrid_Rsquared(self):
"""Return a meshgrid of radius R**2 in polar coordinates for each
pixel in the field."""
Y, X = self.mgrid_cartesian
return X**2+Y**2
@property
def mgrid_R(self):
"""Return a meshgrid of radius R in polar coordinates for each
pixel in the field."""
#often phi might not be required, no need to calc it
return | _np.sqrt(self.mgrid_Rsquared) | numpy.sqrt |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.agents.neural_linucb_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.agents import neural_linucb_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.drivers import driver_utils
from tf_agents.bandits.networks import global_and_arm_feature_network
from tf_agents.bandits.policies import policy_utilities
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step
from tf_agents.utils import common
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal
tfd = tfp.distributions
class DummyNet(network.Network):
def __init__(self, observation_spec, encoding_dim=10):
super(DummyNet, self).__init__(
observation_spec, state_spec=(), name='DummyNet')
context_dim = observation_spec.shape[0]
# Store custom layers that can be serialized through the Checkpointable API.
self._dummy_layers = [
tf.keras.layers.Dense(
encoding_dim,
kernel_initializer=tf.compat.v1.initializers.constant(
np.ones([context_dim, encoding_dim])),
bias_initializer=tf.compat.v1.initializers.constant(
np.zeros([encoding_dim])))
]
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self._dummy_layers:
inputs = layer(inputs)
return inputs, network_state
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
def _get_initial_and_final_steps(batch_size, context_dim):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation + 100.0, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
return initial_step, final_step
def _get_initial_and_final_steps_with_action_mask(batch_size,
context_dim,
num_actions=None):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
observation = tf.constant(observation, dtype=tf.float32)
mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation, mask))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation + 100.0, mask))
return initial_step, final_step
def _get_action_step(action):
return policy_step.PolicyStep(
action=tf.convert_to_tensor(action),
info=policy_utilities.PolicyInfo())
def _get_experience(initial_step, action_step, final_step):
single_experience = driver_utils.trajectory_for_bandit(
initial_step, action_step, final_step)
# Adds a 'time' dimension.
return tf.nest.map_structure(
lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),
single_experience)
@test_util.run_all_in_graph_and_eager_modes
class NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NeuralLinUCBAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
@test_cases()
def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like LinUCB."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
loss_info = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
# Compute the expected updated estimates.
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
# pylint: disable=cell-var-from-loop
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
# Check that the actual updated estimates match the expectations.
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateDistributed(self, batch_size=1, context_dim=10):
"""Same as above but with distributed LinUCB updates."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
# Call the distributed LinUCB training instead of agent.train().
train_fn = common.function_in_tf1()(
agent.compute_loss_using_linucb_distributed)
reward = tf.cast(experience.reward, agent._dtype)
loss_info = train_fn(
experience.observation, action, reward, weights=None)
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
# Compute the expected updated estimates.
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
# pylint: disable=cell-var-from-loop
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
# Check that the actual updated estimates match the expectations.
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like eps-greedy."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions, encoding_dim)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
variable_collection=variable_collection,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(
self, batch_size=1, context_dim=10):
"""Check updates when behaving like eps-greedy and using masked actions."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
batch_size, context_dim, num_actions)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),
tensor_spec.TensorSpec([num_actions], tf.int32))
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec[0])
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
def testInitializeRestoreVariableCollection(self):
if not tf.executing_eagerly():
self.skipTest('Test only works in eager mode.')
num_actions = 5
encoding_dim = 7
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions=num_actions, encoding_dim=encoding_dim)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(variable_collection.num_samples_list)
checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')
checkpoint.save(file_prefix=checkpoint_prefix)
variable_collection.actions_from_reward_layer.assign(False)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
self.evaluate(checkpoint_load_status.initialize_or_restore())
self.assertEqual(
self.evaluate(variable_collection.actions_from_reward_layer), True)
def testTrainPerArmAgentWithMask(self):
num_actions = 5
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
2, 3, num_actions, add_action_mask=True)
time_step_spec = time_step.time_step_spec(obs_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoding_dim = 10
encoder = (
global_and_arm_feature_network.create_feed_forward_common_tower_network(
obs_spec[0], (4, 3), (3, 4), (4, 2), encoding_dim))
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]),
accepts_per_arm_features=True,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
observations = ({
bandit_spec_utils.GLOBAL_FEATURE_KEY:
tf.constant([[1, 2], [3, 4]], dtype=tf.float32),
bandit_spec_utils.PER_ARM_FEATURE_KEY:
tf.cast(
tf.reshape(tf.range(30), shape=[2, 5, 3]), dtype=tf.float32)
}, tf.ones(shape=(2, num_actions), dtype=tf.int32))
actions = | np.array([0, 3], dtype=np.int32) | numpy.array |
# coding=utf-8
"""
audfprint_match.py
Fingerprint matching code for audfprint
2014-05-26 <NAME> <EMAIL>
"""
from __future__ import division, print_function
import os
import time
import psutil
# import matplotlib.pyplot as plt
# import librosa
# import librosa.display
import numpy as np
# import scipy.signal
import audfprint_analyze # for localtest and illustrate
# import audio_read
def process_info():
rss = usrtime = 0
p = psutil.Process(os.getpid())
if os.name == 'nt':
rss = p.memory_info()[0]
usrtime = p.cpu_times()[0]
else:
rss = p.get_memory_info()[0]
usrtime = p.get_cpu_times()[0]
return rss, usrtime
def log(message):
""" log info with stats """
print('%s physmem=%s utime=%s %s' % (time.ctime(), process_info()))
def encpowerof2(val):
""" Return N s.t. 2^N >= val """
return int(np.ceil(np.log(max(1, val)) / | np.log(2) | numpy.log |
""" dict_sbmat module
Helper functions to deal with sparse block matrices using a dictionary
with index tuples as keys. This is beneficial e.g. in assembling coupled
circuit/FEM systems in the matrix level.
E.g. if you have sparse matrices A B C D and you want to create a sparse
block matrix like
[[A, 0, 0],
[0, B, C],
[0,-D, 0]]
you can do the following:
> sm = {}
> sm[(0,0)] = A
> sm[(1,1)] = B
> sm[(1,2)] = C
> sm[(2,1)] = -D
Inspect the block structure with print_blocks
> dict_tools.print_blocks(sm)
Create a scipy bmat with tolist
> S = scipy.sparse.bmat(dict_tools.tolist(sm))
Pick subblocks corresponding to the block indices of the resulting sparse
matrix with 'submat' and 'mk_selector_builder'.
To e.g. pick blocks
S11 = [[A]]
S12 = [[0,0]]
S21 = [[0],
[0]]
S22 = [[B,C],
[-D,0]]
use
> builder = mk_selector_builder(sm)
> P11,Q11 = builder([0],[0])
> S11 = P11*S*Q11
> P12,Q12 = builder([0], [1,2])
> S12 = P12*S*P12
> P21,Q21 = builder([1,2], [0])
> S21 = P21*S*Q21
> P22,Q22 = builder([1,2], [1,2])
> S22 = P22*S*Q22
At first this seems terribly inefficient, but it really isn't. Using the
sparse linear algebra * to pick rows and columnt sacrifices some memory but
is extremely simple to use and e.g. utilizes the sparsity patterns of all
matrices efficiently. """
import numpy as np
import scipy.sparse as sps
from itertools import product
def tolist(dmat):
""" Convert dmat to a list format [[A,None,...], ...] where
empty blocks are filled with None. This can be given as an input to
scipy.sparse.bmat """
inds = np.array(list(dmat.keys()))
nrows = np.max(inds[:,0])+1
ncols = np.max(inds[:,1])+1
return [[dmat.get((row,col),None)
for col in range(0,ncols)]
for row in range(0,nrows)]
def print_blocks(dmat):
inds = np.array(list(dmat.keys()))
xdim = | np.max(inds[:,0]) | numpy.max |
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
def skew(x):
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
def compute_fundamental_from_poses(K_src, K_dst, T_src, T_dst):
T_src2dst = T_dst.dot(np.linalg.inv(T_src))
R = T_src2dst[:3, :3]
t = T_src2dst[:3, 3]
tx = skew(t)
E = np.dot(tx, R)
return np.linalg.inv(K_dst).T.dot(E).dot(np.linalg.inv(K_src))
def detect_keypoints(im, detector, num_kpts=10000):
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
if detector == 'sift':
sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_kpts)
kpts = sift.detect(gray)
elif detector == 'orb':
orb = cv2.ORB_create(nfeatures=num_kpts)
kpts = orb.detect(gray)
else:
raise NotImplementedError('Unknown keypoint detector.')
return kpts
def extract_feats(im, kpts, feature_type, model=None):
if feature_type == 'sift':
sift = cv2.xfeatures2d.SIFT_create()
kpts, feats = sift.compute(im, kpts)
elif feature_type == 'orb':
orb = cv2.ORB_create()
kpts, feats = orb.compute(im, kpts)
elif feature_type == 'caps':
assert model is not None
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
kpts = | np.array([[kp.pt[0], kp.pt[1]] for kp in kpts]) | numpy.array |
import numpy as np
import pytest
import gbpy.pad_dump_file as pad
import gbpy.util_funcs as uf
import byxtal.lattice as gbl
@pytest.mark.parametrize('filename0, element, num_GBregion, actual_min_z_gbreg, actual_max_z_gbreg,'
'actual_w_bottom_SC, actual_w_top_SC',
[("tests/data/dump_2", "Al", 51, -3.06795, 1.44512, 116.85, 118.462)])
# ("tests/data/dump_1", "Al", 138, -2.811127714, 2.811127714, 94, 91.5),
def test_GB_finder(filename0, element, num_GBregion, actual_min_z_gbreg, actual_max_z_gbreg,
actual_w_bottom_SC, actual_w_top_SC):
l1 = gbl.Lattice(str(element))
data = uf.compute_ovito_data(filename0)
non_p = uf.identify_pbc(data)
GbRegion, GbIndex, GbWidth, w_bottom_SC, w_top_SC = pad.GB_finder(data, l1, non_p, 'ptm', '.1')
assert np.abs((actual_w_bottom_SC - w_bottom_SC)/actual_w_bottom_SC) < .5
assert np.abs((actual_w_top_SC - w_top_SC)/actual_w_top_SC) < .5
assert np.abs(GbRegion[0] - actual_min_z_gbreg) < 1e-3
assert np.abs(GbRegion[1] - actual_max_z_gbreg) < 1e-3
assert | np.shape(GbIndex) | numpy.shape |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script yields the values for the illustrative example in
.. seealso::
[1] <NAME>, <NAME>, <NAME>, "Assessing Transferability from Simulation to Reality for Reinforcement
Learning", PAMI, 2021
"""
import os
import os.path as osp
import numpy as np
from matplotlib import pyplot as plt
from scipy import special
import pyrado
from pyrado import set_seed
from pyrado.environments.one_step.catapult import CatapultExample
from pyrado.plotting.curve import draw_curve_from_data
from pyrado.utils.argparser import get_argparser
def calc_E_n_Jhat(n, th):
r"""
Calculate $E_\\xi[ \hat{J}_n(\theta) ]$ approximated by $sum_{i=1}^n p(\\xi_i) \hat{J}_n(\theta)$.
:param n: number of domains $n$ to approximate the expectation
:param th: (arbitrary) policy parameter, might be estimated using n domain parameters, but does not have to be
:return: approximation of $E_\\xi[ \hat{J}_n(\theta) ]$
"""
E_n_Jhat_th = 0
for i in range(n + 1):
# i is the number of Venus draws
binom_coeff = special.binom(n, i)
E_n_Jhat_th += binom_coeff * pow(psi, i) * pow(1 - psi, n - i) * env.est_expec_return(th, n - i, i)
return E_n_Jhat_th
def calc_E_n_Jhat_th_opt(n):
r"""
Calculate $E_\\xi[ \hat{J}_n(\theta^*) ]$ approximated by $sum_{i=1}^n p(\\xi_i) \hat{J}_n(\theta^*)$.
:param n: number of domains $n$ to approximate the expectation
:return: approximation of $E_\\xi[ \hat{J}_n(\theta^*) ]$
"""
E_n_Jhat_th_opt = 0
for i in range(n + 1):
# i is the number of Venus draws
binom_coeff = special.binom(n, i)
E_n_Jhat_th_opt += binom_coeff * pow(psi, i) * pow(1 - psi, n - i) * env.opt_est_expec_return(n - i, i)
return E_n_Jhat_th_opt
def check_E_n_Jhat(th_n_opt, n):
"""
Check the influence of the number of domains $n$ used for the expectation operator.
:param th_n_opt: optimal policy parameter determined from n domains
:param n: number of domains $n$ used for determining the policy parameters
"""
# "Manual" expectation using n=3 domain parameters
E_3_Jhat_n_opt = (
1 * pow(psi, 3) * env.est_expec_return(th_n_opt, 0, 3)
+ 3 * pow(psi, 2) * pow(1 - psi, 1) * env.est_expec_return(th_n_opt, 1, 2)
+ 3 * pow(psi, 1) * pow(1 - psi, 2) * env.est_expec_return(th_n_opt, 2, 1)
+ 1 * pow(1 - psi, 3) * env.est_expec_return(th_n_opt, 3, 0)
)
print(f"E_3_Jhat_{n}_opt: {E_3_Jhat_n_opt}")
# Expectation using n=50 domain parameters
E_3_Jhat_n_opt = calc_E_n_Jhat(3, th_n_opt)
print(f"E_3_Jhat_{n}_opt: {E_3_Jhat_n_opt}")
# Expectation using n=50 domain parameters
E_50_Jhat_n_opt = calc_E_n_Jhat(50, th_n_opt)
print(f"E_50_Jhat_{n}_opt: {E_50_Jhat_n_opt}")
# Expectation using n=500 domain parameters
E_500_Jhat_n_opt = calc_E_n_Jhat(500, th_n_opt)
print(f"E_500_Jhat_{n}_opt: {E_500_Jhat_n_opt}")
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Set up the example
ex_dir = osp.join(pyrado.EVAL_DIR, "illustrative_example")
env = CatapultExample(m=1.0, g_M=3.71, k_M=1000.0, x_M=0.5, g_V=8.87, k_V=3000.0, x_V=1.5)
psi = 0.7 # true probability of drawing Venus
num_samples = 100
num_iter = 30
noise_th_scale = 0.15
set_seed(args.seed)
fig_size = tuple([0.75 * x for x in pyrado.figsize_thesis_1percol_18to10])
th_true_opt = env.opt_policy_param(1 - psi, psi) # true probabilities instead of counts
J_true_opt = env.opt_est_expec_return(1 - psi, psi) # true probabilities instead of counts
print(f"th_true_opt: {th_true_opt}")
print(f"J_true_opt: {J_true_opt}\n")
# Initialize containers
n_M_hist = np.empty((num_samples, num_iter))
n_V_hist = np.empty((num_samples, num_iter))
th_n_opt_hist = np.empty((num_samples, num_iter))
th_c_hist = np.empty((num_samples, num_iter))
Jhat_th_n_opt_hist = np.empty((num_samples, num_iter))
Jhat_th_c_hist = np.empty((num_samples, num_iter))
Jhat_th_true_opt_hist = np.empty((num_samples, num_iter))
G_n_hist = np.empty((num_samples, num_iter))
G_true_hist = np.empty((num_samples, num_iter))
b_Jhat_n_hist = np.empty((num_samples, num_iter))
for s in range(num_samples):
for n in range(1, num_iter + 1):
n_V = np.random.binomial(n, psi) # perform n Bernoulli trials
n_M = n - n_V
n_M_hist[s, n - 1], n_V_hist[s, n - 1] = n_M, n_V
# Compute the optimal policy parameters
th_n_opt = env.opt_policy_param(n_M, n_V)
th_n_opt_hist[s, n - 1] = th_n_opt
if args.verbose:
print(f"th_{n}_opt: {th_n_opt}")
# Compute the estimated optimal objective function value for the n domains
Jhat_th_n_opt = env.opt_est_expec_return(n_M, n_V)
Jhat_th_n_opt_hist[s, n - 1] = Jhat_th_n_opt
if args.verbose:
print(f"Jhat_{n}_opt: {Jhat_th_n_opt}")
Jhat_n_opt_check = env.est_expec_return(th_n_opt, n_M, n_V)
assert abs(Jhat_th_n_opt - Jhat_n_opt_check) < 1e-8
# Check if E_\xi[max_\theta \hat{J}_n(\theta)] == max_\theta \hat{J}_n(\theta)
if args.verbose:
check_E_n_Jhat(th_n_opt, n)
# Compute the estimated objective function value for the tur optimum
Jhat_th_true_opt = env.est_expec_return(th_true_opt, n_M, n_V)
Jhat_th_true_opt_hist[s, n - 1] = Jhat_th_true_opt
# Create (arbitrary) candidate solutions
noise_th = float(np.random.randn(1) * noise_th_scale) # parameter noise
th_c = th_true_opt + noise_th # G_n > G_true (it should be like this)
# th_c = th_n_opt + noise_th # G_n < G_true (it should not be like this)
th_c_hist[s, n - 1] = th_c
Jhat_th_c = env.est_expec_return(th_c, n_M, n_V)
Jhat_th_c_hist[s, n - 1] = Jhat_th_c
# Estimated optimality gap \hat{G}_n(\theta^c)
G_n = Jhat_th_n_opt - Jhat_th_c
G_n_hist[s, n - 1] = G_n
if args.verbose:
print(f"G_{n}(th_c):\t\t{G_n}")
# True optimality gap G(\theta^c) (use true probabilities instead of counts)
G_true = J_true_opt - env.est_expec_return(th_c, 1 - psi, psi)
G_true_hist[s, n - 1] = G_true
if args.verbose:
print(f"G_true(th_c):\t{G_true}")
# Compute the simulation optimization bias b[\hat{J}_n]
b_Jhat_n = calc_E_n_Jhat_th_opt(n) - J_true_opt
b_Jhat_n_hist[s, n - 1] = b_Jhat_n
if args.verbose:
print(f"b_Jhat_{n}:\t\t{b_Jhat_n}\n")
print(f"At the last iteration (n={num_iter})")
print(f"mean G_n: {np.mean(G_n_hist, axis=0)[-1]}")
print(f"mean G_true: {np.mean(G_true_hist, axis=0)[-1]}")
print(f"mean b_Jhat_n: {np.mean(b_Jhat_n_hist, axis=0)[-1]}\n")
# Plot
os.makedirs(ex_dir, exist_ok=True)
fig_n, ax = plt.subplots(1, figsize=fig_size, constrained_layout=True)
draw_curve_from_data(
"ci_on_mean",
ax,
n_M_hist,
| np.arange(1, num_iter + 1) | numpy.arange |
import os
import argparse
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import numpy as np
from dataset import potsdam, label_to_RGB
from seg_metric import SegmentationMetric
import cv2
from mutil_scale_test import MultiEvalModule
import logging
import warnings
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
def reduce_tensor(inp):
"""
Reduce the loss from all processes so that
process with rank 0 has the averaged results.
"""
world_size = get_world_size()
if world_size < 2:
return inp
with torch.no_grad():
reduced_inp = inp
torch.distributed.reduce(reduced_inp, dst=0)
return reduced_inp
class params():
def __init__(self, args2):
if args2.dataset in ['potsdam', 'vaihingen']:
self.number_of_classes = 6
models = args2.models
if models == 'HRNet_32':
"hrnet32"
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [32, 64],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [32, 64, 128],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [32, 64, 128, 256],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
elif models == 'HRNet_48':
self.STAGE2 = {'NUM_MODULES': 1,
'NUM_BRANCHES': 2,
'NUM_BLOCKS': [4, 4],
'NUM_CHANNELS': [32, 64],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE3 = {'NUM_MODULES': 4,
'NUM_BRANCHES': 3,
'NUM_BLOCKS': [4, 4, 4],
'NUM_CHANNELS': [32, 64, 128],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
self.STAGE4 = {'NUM_MODULES': 3,
'NUM_BRANCHES': 4,
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_CHANNELS': [32, 64, 128, 256],
'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM'}
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument("--dataset", type=str, default='vaihingen', choices=['potsdam', 'vaihingen'])
parser.add_argument("--val_batchsize", type=int, default=16)
parser.add_argument("--crop_size", type=int, nargs='+', default=[512, 512], help='H, W')
parser.add_argument("--models", type=str, default='danet',
choices=['danet', 'bisenetv2', 'pspnet', 'segbase', 'swinT', 'deeplabv3', 'fcn', 'fpn', 'unet', 'resT'])
parser.add_argument("--head", type=str, default='uperhead')
parser.add_argument("--use_edge", type=int, default=0)
parser.add_argument("--save_dir", type=str, default='work_dir')
parser.add_argument("--base_dir", type=str, default='./')
parser.add_argument("--information", type=str, default='RS')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--save_gpu_memory", type=int, default=0)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args2 = parser.parse_args()
return args2
def get_model():
models = args2.models
if models == 'swinT':
print(models, args2.head)
else:
print(models)
if args2.dataset in ['potsdam', 'vaihingen']:
nclass = 6
assert models in ['danet', 'bisenetv2', 'pspnet', 'segbase', 'swinT', 'deeplabv3', 'fcn', 'fpn', 'unet', 'resT']
if models == 'danet':
from models.danet import DANet
model = DANet(nclass=nclass, backbone='resnet50', pretrained_base=False)
if models == 'bisenetv2':
from models.bisenetv2 import BiSeNetV2
model = BiSeNetV2(nclass=nclass)
if models == 'pspnet':
from models.pspnet import PSPNet
model = PSPNet(nclass=nclass, backbone='resnet50', pretrained_base=False)
if models == 'segbase':
from models.segbase import SegBase
model = SegBase(nclass=nclass, backbone='resnet50', pretrained_base=False)
if models == 'swinT':
from models.swinT import swin_tiny as swinT
if args2.use_edge:
model = swinT(nclass=nclass, pretrained=False, aux=True, head=args2.head, edge_aux=args2.use_edge)
else:
model = swinT(nclass=nclass, pretrained=False, aux=True, head=args2.head)
if models == 'resT':
from models.resT import rest_tiny as resT
if args2.use_edge:
model = resT(nclass=nclass, pretrained=False, aux=True, head=args2.head, edge_aux=args2.use_edge)
else:
model = resT(nclass=nclass, pretrained=False, aux=True, head=args2.head)
if models == 'deeplabv3':
from models.deeplabv3 import DeepLabV3
model = DeepLabV3(nclass=nclass, backbone='resnet50', pretrained_base=False)
if models == 'fcn':
from models.fcn import FCN16s
model = FCN16s(nclass=nclass)
if models == 'fpn':
from models.fpn import FPN
model = FPN(nclass=nclass)
if models == 'unet':
from models.unet import UNet
model = UNet(nclass=nclass)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(device)
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args2.local_rank], output_device=args2.local_rank, find_unused_parameters=True)
return model
args2 = parse_args()
args = params(args2)
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enabled = True
distributed = True
device = torch.device(('cuda:{}').format(args2.local_rank))
if distributed:
torch.cuda.set_device(args2.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://",
)
data_dir = os.path.join(args2.base_dir, 'data')
potsdam_val = potsdam(base_dir=data_dir, train=False,
dataset=args2.dataset, crop_szie=args2.crop_size)
if distributed:
val_sampler = DistributedSampler(potsdam_val)
else:
val_sampler = None
dataloader_val = DataLoader(
potsdam_val,
batch_size=args2.val_batchsize,
shuffle=False,
num_workers=4,
pin_memory=True,
sampler=val_sampler)
potsdam_val_full = potsdam(base_dir=data_dir, train=False,
dataset=args2.dataset, crop_szie=args2.crop_size, val_full_img=True)
if distributed:
full_val_sampler = DistributedSampler(potsdam_val_full)
else:
full_val_sampler = None
dataloader_val_full = DataLoader(
potsdam_val_full,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=True,
sampler=full_val_sampler)
def val(model, weight_path):
if args2.dataset in ['potsdam', 'vaihingen']:
nclasses = 6
model.eval()
metric = SegmentationMetric(numClass=nclasses)
with torch.no_grad():
model_state_file = weight_path
if os.path.isfile(model_state_file):
print('loading checkpoint successfully')
logging.info("=> loading checkpoint '{}'".format(model_state_file))
checkpoint = torch.load(model_state_file, map_location=lambda storage, loc: storage)
checkpoint = {k: v for k, v in checkpoint.items() if not 'loss' in k}
checkpoint = {k.replace('model.', ''): v for k, v in checkpoint.items()}
model.load_state_dict(checkpoint)
else:
warnings.warn('weight is not existed !!!"')
for i, sample in enumerate(dataloader_val):
images, labels = sample['image'], sample['label']
images = images.cuda()
labels = labels.long().squeeze(1)
logits = model(images)
print("test:{}/{}".format(i, len(dataloader_val)))
logits = logits.argmax(dim=1)
logits = logits.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
metric.addBatch(logits, labels)
result_count(metric)
def mutil_scale_val(model, weight_path, object_path):
if args2.dataset in ['potsdam', 'vaihingen']:
nclasses = 6
model = MultiEvalModule(model, nclass=nclasses, flip=True, scales=[0.5, 0.75, 1.0, 1.25, 1.5], save_gpu_memory=args2.save_gpu_memory,
crop_size=args2.crop_size, stride_rate=1/2, get_batch=args2.val_batchsize)
model.eval()
metric = SegmentationMetric(nclasses)
with torch.no_grad():
model_state_file = weight_path
if os.path.isfile(model_state_file):
print('loading checkpoint successfully')
logging.info("=> loading checkpoint '{}'".format(model_state_file))
checkpoint = torch.load(model_state_file, map_location=lambda storage, loc: storage)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
elif 'model' in checkpoint:
checkpoint = checkpoint['model']
else:
checkpoint = checkpoint
checkpoint = {k: v for k, v in checkpoint.items() if not 'n_averaged' in k}
checkpoint = {k.replace('model.', 'module.'): v for k, v in checkpoint.items()}
model.load_state_dict(checkpoint)
else:
warnings.warn('weight is not existed !!!"')
for i, sample in enumerate(dataloader_val_full):
images, labels, names = sample['image'], sample['label'], sample['name']
images = images.cuda()
labels = labels.long().squeeze(1)
logits = model(images)
print("test:{}/{}".format(i, len(dataloader_val_full)))
logits = logits.argmax(dim=1)
logits = logits.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
metric.addBatch(logits, labels)
vis_logits = label_to_RGB(logits.squeeze())[:, :, ::-1]
save_path = os.path.join(object_path, 'outputs', names[0] + '.png')
cv2.imwrite(save_path, vis_logits)
result_count(metric)
def result_count(metric):
iou = metric.IntersectionOverUnion()
miou = np.nanmean(iou[0:5])
acc = metric.Accuracy()
f1 = metric.F1()
mf1 = np.nanmean(f1[0:5])
precision = metric.Precision()
mprecision = np.nanmean(precision[0:5])
recall = metric.Recall()
mrecall = np.nanmean(recall[0:5])
iou = reduce_tensor(torch.from_numpy(np.array(iou)).to(device) / get_world_size()).cpu().numpy()
miou = reduce_tensor(torch.from_numpy(np.array(miou)).to(device) / get_world_size()).cpu().numpy()
acc = reduce_tensor(torch.from_numpy(np.array(acc)).to(device) / get_world_size()).cpu().numpy()
f1 = reduce_tensor(torch.from_numpy(np.array(f1)).to(device) / get_world_size()).cpu().numpy()
mf1 = reduce_tensor(torch.from_numpy(np.array(mf1)).to(device) / get_world_size()).cpu().numpy()
precision = reduce_tensor(torch.from_numpy( | np.array(precision) | numpy.array |
"""
@author: jens
@modifiers: hyatt, neergaard
Migrated from inf_hypnodensity on 12/6/2019
"""
import pickle
import numpy as np
import pywt # wavelet entropy
import itertools # for extracting feature combinations
import os # for opening os files for pickle.
from inf_tools import softmax
class HypnodensityFeatures(object): # <-- extract_features
num_features = 489
def __init__(self, app_config):
self.config = app_config
# Dictionaries, keyed by model names
self.meanV = {}
# Standard deviation of features.
self.stdV = {}
# range is calculated as difference between 15th and 85th percentile - this was previously the "scaleV".
self.rangeV = {}
self.medianV = {}
try:
self.selected = app_config.narco_prediction_selected_features
except:
self.selected = [] # [1, 11, 16, 22, 25, 41, 43, 49, 64, 65, 86, 87, 103, 119, 140, 147, 149, 166, 196, 201, 202, 220, 244, 245, 261, 276, 289, 296, 299, 390, 405, 450, 467, 468, 470, 474, 476, 477]
self.scale_path = app_config.hypnodensity_scale_path # 'scaling'
# self.select_features_path = appConfig.hypnodensity_select_features_path
# self.select_features_pickle_name = appConfig.hypnodensity_select_features_pickle_name # 'narcoFeatureSelect.p'
def extract(self, hyp):
eps = 1e-10
features = np.zeros([24 + 31 * 15])
hyp = hyp[~np.isnan(hyp[:, 0]), :] # or np.invert(np.isnan(hyp[:, 0])
# k = [i for i, v in enumerate(hyp[:, 0]) if np.isnan(v)]
# hyp[k[0] - 2:k[-1] + 2, :]
j = -1
for i in range(5):
for comb in itertools.combinations([0, 1, 2, 3, 4], i + 1): # 31 iterations and 15 features per iteration
j += 1
dat = np.prod(hyp[:, comb], axis=1) ** (1 / float(len(comb)))
features[j * 15] = np.log(np.mean(dat) + eps)
features[j * 15 + 1] = -np.log(1 - np.max(dat))
moving_av = np.convolve(dat, np.ones(10), mode='valid')
features[j * 15 + 2] = np.mean(np.abs(np.diff(moving_av))) # diff of raw data
# features[j * 15 + 2] = np.mean(np.abs(np.diff(dat))) # Alex's next version: moving average may smooth the transitions out too much - removing a hyper-parameter
features[j * 15 + 3] = self.wavelet_entropy(dat) # Shannon entropy - check if it is used as a feature - was not selected.
rate = np.cumsum(dat) / np.sum(dat)
# check at which point of the study the percentage of this combination of sleep stages is reached.
try:
I1 = (i for i, v in enumerate(rate) if v > 0.05).__next__()
except StopIteration:
I1 = len(hyp)
features[j * 15 + 4] = np.log(I1 * 2 + eps)
try:
I2 = (i for i, v in enumerate(rate) if v > 0.1).__next__()
except StopIteration:
I2 = len(hyp)
features[j * 15 + 5] = np.log(I2 * 2 + eps)
try:
I3 = (i for i, v in enumerate(rate) if v > 0.3).__next__()
except StopIteration:
I3 = len(hyp)
features[j * 15 + 6] = np.log(I3 * 2 + eps)
try:
I4 = (i for i, v in enumerate(rate) if v > 0.5).__next__()
# I4 = next(i for i, v in enumerate(rate) if v > 0.5) # for when we have to update python
except StopIteration:
I4 = len(hyp)
features[j * 15 + 7] = | np.log(I4 * 2 + eps) | numpy.log |
import sys
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import codecs
DPI = 300
FIGSIZE = (12,4)
FONTSIZE_LABELS = 16
LINEWIDTH = 1
TICKINDEX_MAJOR_X = 10
TICKINDEX_MINOR_X = TICKINDEX_MAJOR_X / 5
# TICKINDEX_MAJOR_Y = 1
# TICKINDEX_MINOR_Y = TICKINDEX_MAJOR_Y / 5
COLORS = dict(bg_blue='#0B3C5D', bg_red='#B82601', bg_green='#1c6b0a',
bg_lightblue='#328CC1', bg_darkblue='#062F4F', bg_yellow='#D9B310',
bg_darkred='#984B43', bg_bordeaux='#76323F', bg_olivegreen='#626E60',
bg_yellowgrey='#AB987A', bg_brownorange='#C09F80')
COLOR = COLORS["bg_blue"]
def ras_to_csv_converter_plotter(ras_files):
for e in ras_files:
filename = e.stem
print(f"\t{filename}")
tt, int_exp = [], []
with codecs.open(e, 'r', 'charmap') as f:
lines = f.readlines()
for i in range(0, len(lines)):
if '*RAS_INT_START' in lines[i]:
start = i + 1
elif '*RAS_INT_END' in lines[i]:
end = i
for i in range(start, end):
tt.append(float(lines[i].split()[0]))
int_exp.append(float(lines[i].split()[1]))
tt, int_exp = np.array(tt), np.array(int_exp)
tt_int_exp = np.column_stack((tt, int_exp))
| np.savetxt(f"csv/{filename}.csv", tt_int_exp, delimiter=",", fmt="%.3f") | numpy.savetxt |
# -*- coding: utf-8 -*-
#try:
# from Numeric import *
#except ImportError:
from numpy import *
import copy
import numpy
outerproduct = outer
PI2 = pi*2.0
# for debuging set a seed
#random.seed(42)
def make_vec(l):
return array(l, "d")
def scal_prod(v1, v2):
return sum(v1*v2,axis=-1)
def length(v):
return sqrt(sum(v*v),axis=-1)
def norm(v1):
return sqrt(scal_prod(v1,v1))
def normalize(v1):
n = norm(v1)
if isscalar(n):
if isclose(n,0):
return v1
else:
return v1/n
else:
return v1/n[:,newaxis]
def angle(v1, v2):
_nv1 = normalize(v1)
_nv2 = normalize(v2)
d = scal_prod(_nv1, _nv2)
if d < -1.0: d=-1.0
if d > 1.0 : d= 1.0
return arccos(d)
def project(v1, v2):
_nv2 = normalize(v2)
l = scal_prod(v1, _nv2)
return _nv2*l
def cross_prod(a, b):
return array( [a[1]*b[2] - a[2]*b[1], \
a[2]*b[0] - a[0]*b[2], \
a[0]*b[1] - a[1]*b[0]], "d")
def rotmat(v, theta):
Q = array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]], "d")
Q *= sin(theta)
uut = outerproduct(v,v)
Q += (identity(3,"d") - uut)*cos(theta)
Q += uut
return Q
def rotate(xyz, v, theta):
return dot(xyz, transpose(rotmat(v, theta)))
def rotmat_from_euler(euler):
R = zeros([3,3],"d")
sa = sin(euler[0])
ca = cos(euler[0])
sb = sin(euler[1])
cb = cos(euler[1])
sg = sin(euler[2])
cg = cos(euler[2])
R[0, 0] = cb * cg
R[1, 0] = cb * sg
R[2, 0] = -sb
R[0, 1] = -ca * sg + sa * sb * cg
R[1, 1] = ca * cg + sa * sb * sg
R[2, 1] = sa * cb
R[0, 2] = sa * sg + ca * sb * cg
R[1, 2] = -sa * cg + ca * sb * sg
R[2, 2] = ca * cb
return R
def rotate_by_euler(xyz, euler):
return dot(xyz, transpose(rotmat_from_euler(euler)))
def random_quat():
rand = random.random(3)
r1 = sqrt(1.0 - rand[0])
r2 = sqrt(rand[0])
t1 = PI2 * rand[1]
t2 = PI2 * rand[2]
return array([cos(t2)*r2, sin(t1)*r1, cos(t1)*r1, sin(t2)*r2])
def rotation_quat(triple):
# with an input of three numbers between zero and one we scan the rotational space in an equal fashion
t0 = triple[0]
if t0>1.0:t0=1.0
if t0<0.0:t0=0.0
r1 = sqrt(1.0 - t0)
r2 = sqrt(t0)
t1 = PI2 * (triple[1]%1.0)
t2 = PI2 * (triple[2]%1.0)
return array([cos(t2)*r2, sin(t1)*r1, cos(t1)*r1, sin(t2)*r2])
def quat_to_mat(quat):
q = array(quat, copy=True)
n = dot(q, q)
if n < 1.0e-15:
return identity(3)
q *= sqrt(2.0 / n)
q = outer(q, q)
return array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0]],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0]],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2]]])
def apply_mat(m,v):
return dot(v,m)
def rotate_by_triple(xyz, triple):
rotmat = quat_to_mat(rotation_quat(triple))
return dot(xyz, rotmat)
def rotate_random(v):
return apply_mat(quat_to_mat(random_quat()),v)
def moi2(rs, ms=None):
"""Moment of inertia"""
if ms is None: ms = numpy.ones(len(rs))
else: ms = numpy.asarray(ms)
rs = numpy.asarray(rs)
N = rs.shape[1]
# Matrix is symmetric, so inner/outer loop doesn't matter
return [[(ms*rs[:,i]*rs[:,j]).sum()/ms.sum()
for i in range(N)] for j in range(N)]
def moi(rs,ms=None):
if ms is None: ms = numpy.ones(len(rs))
else: ms = numpy.asarray(ms)
rs = numpy.asarray(rs)
Ixx = (ms* (rs[:,1]*rs[:,1] + rs[:,2]*rs[:,2])).sum()
Iyy = (ms* (rs[:,0]*rs[:,0] + rs[:,2]*rs[:,2])).sum()
Izz = (ms* (rs[:,0]*rs[:,0] + rs[:,1]*rs[:,1])).sum()
Ixy =-(ms* rs[:,0] * rs[:,1]).sum()
Ixz =-(ms* rs[:,0] * rs[:,2]).sum()
Iyz =-(ms* rs[:,1] * rs[:,2]).sum()
I = [[Ixx,Ixy,Ixy],[Ixy,Iyy,Iyz],[Ixz,Iyz,Izz]]
return numpy.array(I)/ms.sum()
def pax(rs,ms=None):
if ms is None: ms = numpy.ones(len(rs))
else: ms = numpy.asarray(ms)
rs = numpy.asarray(rs)
I = moi(rs,ms=ms)
#print(I)
eigval, eigvec = numpy.linalg.eigh(I)
return eigval,eigvec
def align_pax(xyz,masses=None):
eigval,eigvec = pax(xyz,ms=masses)
eigorder = numpy.argsort(eigval)
rotmat = eigvec[:,eigorder] # sort the column vectors in the order of the eigenvalues to have largest on x, second largest on y, ...
return apply_mat(rotmat,xyz)
def align_bond_to(m,bond,align_xyz):
""" (JK) align a bond to match the direction of the vector given by 'align_xyz'
bond (list of integers, len()=2) """
dxyz = m.xyz[bond[1]] - m.xyz[bond[0]]
import scipy.optimize as opt
def pen(rot,x1,x2):
x2t = x2.copy()
x2t = rotate_by_triple(x2t,rot%1.0)
''' calculate the angle between the vecotrs and return it'''
return numpy.arccos(numpy.dot(x1,x2t)/numpy.linalg.norm(x1)/numpy.linalg.norm(x2t))**2.0
t0 = numpy.array([0.5,0.5,0.5])
o = opt.minimize(pen,t0,args=(dxyz,align_xyz),method='SLSQP',)
m.set_xyz(rotate_by_triple(m.xyz,o.x % 1.0))
return o
def rec_walk_bond(m,ind,inds=[]):
for i,c in enumerate(m.conn[ind]):
if inds.count(c) == 0:
inds.append(c)
inds = rec_walk_bond(m,c,inds=inds)
else:
pass
return inds
def rotate_around_bond(m,atom1,atom2,degrees=5.0):
"""Rotates the xyz coordinates by n degrees around the distance vector between two atoms
let the situation be X-1-2-3-4-Y, either X,1 or Y,4 will be rotated accordingly
Arguments:
mol {molsys.mol} -- the mol obect to apply the operation
atom1 {integer} -- atom index 1
atom2 {integer} -- atom index 2
Keyword Arguments:
degrees {float} -- rotation in degrees (default: {5.0})
"""
### detect the atoms that are subject to the rotation
### rhs
#import pdb; pdb.set_trace()
inds = sorted(rec_walk_bond(m,atom1,[atom2]))
#print inds
xyz = m.xyz
xyz1 = xyz[atom1,:]
xyz2 = xyz[atom2,:]
vect = (xyz2-xyz1)
vect /= numpy.linalg.norm(vect)
a,n1,n2,n3 = | numpy.deg2rad(degrees) | numpy.deg2rad |
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, isdir
import getpass
import time
import struct
import hashlib
import warnings
from ...tmpdirs import InTemporaryDirectory
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_equal, assert_raises, dec, assert_allclose
from .. import (read_geometry, read_morph_data, read_annot, read_label,
write_geometry, write_morph_data, write_annot)
from ..io import _pack_rgb
from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data
from ...fileslice import strided_scalar
from ...testing import clear_and_catch_warnings
DATA_SDIR = 'fsaverage'
have_freesurfer = False
if 'SUBJECTS_DIR' in os.environ:
# May have Freesurfer installed with data
data_path = pjoin(os.environ["SUBJECTS_DIR"], DATA_SDIR)
have_freesurfer = isdir(data_path)
else:
# May have nibabel test data submodule checked out
nib_data = get_nibabel_data()
if nib_data != '':
data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR)
have_freesurfer = isdir(data_path)
freesurfer_test = dec.skipif(
not have_freesurfer,
'cannot find freesurfer {0} directory'.format(DATA_SDIR))
def _hash_file_content(fname):
hasher = hashlib.md5()
with open(fname, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
@freesurfer_test
def test_geometry():
"""Test IO of .surf"""
surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated"))
coords, faces = read_geometry(surf_path)
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere"))
coords, faces, volume_info, create_stamp = read_geometry(
surf_path, read_metadata=True, read_stamp=True)
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
assert_equal(9, len(volume_info))
assert_equal([2, 0, 20], volume_info['head'])
assert_equal(u'created by greve on Thu Jun 8 19:17:51 2006',
create_stamp)
# Test equivalence of freesurfer- and nibabel-generated triangular files
# with respect to read_geometry()
with InTemporaryDirectory():
surf_path = 'test'
create_stamp = "created by %s on %s" % (getpass.getuser(),
time.ctime())
volume_info['cras'] = [1., 2., 3.]
write_geometry(surf_path, coords, faces, create_stamp, volume_info)
coords2, faces2, volume_info2 = \
read_geometry(surf_path, read_metadata=True)
for key in ('xras', 'yras', 'zras', 'cras'):
assert_allclose(volume_info2[key], volume_info[key],
rtol=1e-7, atol=1e-30)
assert_equal(volume_info2['cras'], volume_info['cras'])
with open(surf_path, 'rb') as fobj:
np.fromfile(fobj, ">u1", 3)
read_create_stamp = fobj.readline().decode().rstrip('\n')
# now write an incomplete file
write_geometry(surf_path, coords, faces)
with clear_and_catch_warnings() as w:
warnings.filterwarnings('always', category=DeprecationWarning)
read_geometry(surf_path, read_metadata=True)
assert_true(any('volume information contained' in str(ww.message)
for ww in w))
assert_true(any('extension code' in str(ww.message) for ww in w))
volume_info['head'] = [1, 2]
with clear_and_catch_warnings() as w:
write_geometry(surf_path, coords, faces, create_stamp, volume_info)
assert_true(any('Unknown extension' in str(ww.message) for ww in w))
volume_info['a'] = 0
assert_raises(ValueError, write_geometry, surf_path, coords,
faces, create_stamp, volume_info)
assert_equal(create_stamp, read_create_stamp)
np.testing.assert_array_equal(coords, coords2)
np.testing.assert_array_equal(faces, faces2)
# Validate byte ordering
coords_swapped = coords.byteswap().newbyteorder()
faces_swapped = faces.byteswap().newbyteorder()
np.testing.assert_array_equal(coords_swapped, coords)
np.testing.assert_array_equal(faces_swapped, faces)
@freesurfer_test
@needs_nibabel_data('nitest-freesurfer')
def test_quad_geometry():
"""Test IO of freesurfer quad files."""
new_quad = pjoin(get_nibabel_data(), 'nitest-freesurfer', 'subjects',
'bert', 'surf', 'lh.inflated.nofix')
coords, faces = read_geometry(new_quad)
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
with InTemporaryDirectory():
new_path = 'test'
write_geometry(new_path, coords, faces)
coords2, faces2 = read_geometry(new_path)
assert_equal(coords, coords2)
assert_equal(faces, faces2)
@freesurfer_test
def test_morph_data():
"""Test IO of morphometry data file (eg. curvature)."""
curv_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "curv"))
curv = read_morph_data(curv_path)
assert_true(-1.0 < curv.min() < 0)
assert_true(0 < curv.max() < 1.0)
with InTemporaryDirectory():
new_path = 'test'
write_morph_data(new_path, curv)
curv2 = read_morph_data(new_path)
assert_equal(curv2, curv)
def test_write_morph_data():
"""Test write_morph_data edge cases"""
values = np.arange(20, dtype='>f4')
okay_shapes = [(20,), (20, 1), (20, 1, 1), (1, 20)]
bad_shapes = [(10, 2), (1, 1, 20, 1, 1)]
big_num = np.iinfo('i4').max + 1
with InTemporaryDirectory():
for shape in okay_shapes:
write_morph_data('test.curv', values.reshape(shape))
# Check ordering is preserved, regardless of shape
assert_equal(values, read_morph_data('test.curv'))
assert_raises(ValueError, write_morph_data, 'test.curv',
np.zeros(shape), big_num)
# Windows 32-bit overflows Python int
if np.dtype(np.int) != np.dtype(np.int32):
assert_raises(ValueError, write_morph_data, 'test.curv',
strided_scalar((big_num,)))
for shape in bad_shapes:
assert_raises(ValueError, write_morph_data, 'test.curv',
values.reshape(shape))
@freesurfer_test
def test_annot():
"""Test IO of .annot against freesurfer example data."""
annots = ['aparc', 'aparc.a2005s']
for a in annots:
annot_path = pjoin(data_path, "label", "%s.%s.annot" % ("lh", a))
hash_ = _hash_file_content(annot_path)
labels, ctab, names = read_annot(annot_path)
assert_true(labels.shape == (163842, ))
assert_true(ctab.shape == (len(names), 5))
labels_orig = None
if a == 'aparc':
labels_orig, _, _ = read_annot(annot_path, orig_ids=True)
np.testing.assert_array_equal(labels == -1, labels_orig == 0)
# Handle different version of fsaverage
if hash_ == 'bf0b488994657435cdddac5f107d21e8':
assert_true(np.sum(labels_orig == 0) == 13887)
elif hash_ == 'd4f5b7cbc2ed363ac6fcf89e19353504':
assert_true(np.sum(labels_orig == 1639705) == 13327)
else:
raise RuntimeError("Unknown freesurfer file. Please report "
"the problem to the maintainer of nibabel.")
# Test equivalence of freesurfer- and nibabel-generated annot files
# with respect to read_annot()
with InTemporaryDirectory():
annot_path = 'test'
write_annot(annot_path, labels, ctab, names)
labels2, ctab2, names2 = read_annot(annot_path)
if labels_orig is not None:
labels_orig_2, _, _ = read_annot(annot_path, orig_ids=True)
np.testing.assert_array_equal(labels, labels2)
if labels_orig is not None:
np.testing.assert_array_equal(labels_orig, labels_orig_2)
np.testing.assert_array_equal(ctab, ctab2)
assert_equal(names, names2)
def test_read_write_annot():
"""Test generating .annot file and reading it back."""
# This annot file will store a LUT for a mesh made of 10 vertices, with
# 3 colours in the LUT.
nvertices = 10
nlabels = 3
names = ['label {}'.format(l) for l in range(1, nlabels + 1)]
# randomly generate a label for each vertex, making sure
# that at least one of each label value is present. Label
# values are in the range (0, nlabels-1) - they are used
# as indices into the lookup table (generated below).
labels = list(range(nlabels)) + \
list(np.random.randint(0, nlabels, nvertices - nlabels))
labels = np.array(labels, dtype=np.int32)
np.random.shuffle(labels)
# Generate some random colours for the LUT
rgbal = np.zeros((nlabels, 5), dtype=np.int32)
rgbal[:, :4] = np.random.randint(0, 255, (nlabels, 4))
# But make sure we have at least one large alpha, to make sure that when
# it is packed into a signed 32 bit int, it results in a negative value
# for the annotation value.
rgbal[0, 3] = 255
# Generate the annotation values for each LUT entry
rgbal[:, 4] = (rgbal[:, 0] +
rgbal[:, 1] * (2 ** 8) +
rgbal[:, 2] * (2 ** 16))
annot_path = 'c.annot'
with InTemporaryDirectory():
write_annot(annot_path, labels, rgbal, names, fill_ctab=False)
labels2, rgbal2, names2 = read_annot(annot_path)
names2 = [n.decode('ascii') for n in names2]
assert np.all(np.isclose(rgbal2, rgbal))
assert np.all(np.isclose(labels2, labels))
assert names2 == names
def test_write_annot_fill_ctab():
"""Test the `fill_ctab` parameter to :func:`.write_annot`. """
nvertices = 10
nlabels = 3
names = ['label {}'.format(l) for l in range(1, nlabels + 1)]
labels = list(range(nlabels)) + \
list(np.random.randint(0, nlabels, nvertices - nlabels))
labels = np.array(labels, dtype=np.int32)
| np.random.shuffle(labels) | numpy.random.shuffle |
"""
Module: LMR_verify_gridPRCP.py
Purpose: Generates spatial verification statistics of LMR gridded precipitation
against various gridded historical instrumental precipitation datasets
and precipitation from reanalyses.
Originator: <NAME>, U. of Washington, March 2016
Revisions:
"""
import matplotlib
# need to do this backend when running remotely or to suppress figures interactively
matplotlib.use('Agg')
# generic imports
import numpy as np
import glob, os, sys, calendar
from datetime import datetime, timedelta
from netCDF4 import Dataset, date2num, num2date
import mpl_toolkits.basemap as bm
import matplotlib.pyplot as plt
from matplotlib import ticker
from spharm import Spharmt, getspecindx, regrid
# LMR specific imports
sys.path.append('../')
from LMR_utils import global_hemispheric_means, assimilated_proxies, coefficient_efficiency
from load_gridded_data import read_gridded_data_CMIP5_model
from LMR_plot_support import *
# change default value of latlon kwarg to True.
bm.latlon_default = True
##################################
# START: set user parameters here
##################################
# option to suppress figures
iplot = True
iplot_individual_years = False
# centered time mean (nya must be odd! 3 = 3 yr mean; 5 = 5 year mean; etc 0 = none)
nya = 0
# option to print figures
fsave = True
#fsave = False
# set paths, the filename for plots, and global plotting preferences
# override datadir
#datadir_output = './data/'
#datadir_output = '/home/disk/kalman2/wperkins/LMR_output/archive'
datadir_output = '/home/disk/kalman3/rtardif/LMR/output'
#datadir_output = '/home/disk/ekman4/rtardif/LMR/output'
#datadir_output = '/home/disk/kalman3/hakim/LMR'
# Directories where precip and reanalysis data can be found
datadir_precip = '/home/disk/kalman3/rtardif/LMR/data/verification'
datadir_reanl = '/home/disk/kalman3/rtardif/LMR/data/model'
# file specification
#
# current datasets
# ---
#nexp = 'production_gis_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_ccsm4_pagesall_0.75'
#nexp = 'production_cru_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_era20c_pagesall_0.75'
#nexp = 'production_mlost_era20cm_pagesall_0.75'
# ---
nexp = 'test'
# ---
# perform verification using all recon. MC realizations ( MCset = None )
# or over a custom selection ( MCset = (begin,end) )
# ex. MCset = (0,0) -> only the first MC run
# MCset = (0,10) -> the first 11 MC runs (from 0 to 10 inclusively)
# MCset = (80,100) -> the 80th to 100th MC runs (21 realizations)
MCset = None
#MCset = (0,10)
# Definition of variables to verify
# kind name variable long name bounds units mult. factor
verif_dict = \
{
'pr_sfc_Amon' : ('anom', 'PRCP', 'Precipitation',-400.0,400.0,'(mm/yr)',1.0), \
}
# time range for verification (in years CE)
#trange = [1979,2000] #works for nya = 0
trange = [1880,2000] #works for nya = 0
#trange = [1900,2000] #works for nya = 0
#trange = [1885,1995] #works for nya = 5
#trange = [1890,1990] #works for nya = 10
# reference period over which mean is calculated & subtracted
# from all datasets (in years CE)
# NOTE: GPCP and CMAP data cover the 1979-2015 period
ref_period = [1979, 1999]
valid_frac = 0.0
# number of contours for plots
nlevs = 21
# plot alpha transparency
alpha = 0.5
# set the default size of the figure in inches. ['figure.figsize'] = width, height;
# aspect ratio appears preserved on smallest of the two
plt.rcParams['figure.figsize'] = 10, 10 # that's default image size for this interactive session
plt.rcParams['axes.linewidth'] = 2.0 # set the value globally
plt.rcParams['font.weight'] = 'bold' # set the font weight globally
plt.rcParams['font.size'] = 11 # set the font size globally
#plt.rc('text', usetex=True)
plt.rc('text', usetex=False)
##################################
# END: set user parameters here
##################################
verif_vars = list(verif_dict.keys())
workdir = datadir_output + '/' + nexp
print('working directory = ' + workdir)
print('\n getting file system information...\n')
# get number of mc realizations from directory count
# RT: modified way to determine list of directories with mc realizations
# get a listing of the iteration directories
dirs = glob.glob(workdir+"/r*")
# selecting the MC iterations to keep
if MCset:
dirset = dirs[MCset[0]:MCset[1]+1]
else:
dirset = dirs
mcdir = [item.split('/')[-1] for item in dirset]
niters = len(mcdir)
print('mcdir:' + str(mcdir))
print('niters = ' + str(niters))
# Loop over verif. variables
for var in verif_vars:
# read ensemble mean data
print('\n reading LMR ensemble-mean data...\n')
first = True
k = -1
for dir in mcdir:
k = k + 1
ensfiln = workdir + '/' + dir + '/ensemble_mean_'+var+'.npz'
npzfile = np.load(ensfiln)
print(dir, ':', npzfile.files)
tmp = npzfile['xam']
print('shape of tmp: ' + str(np.shape(tmp)))
if first:
first = False
recon_times = npzfile['years']
LMR_time = np.array(list(map(int,recon_times)))
lat = npzfile['lat']
lon = npzfile['lon']
nlat = npzfile['nlat']
nlon = npzfile['nlon']
lat2 = np.reshape(lat,(nlat,nlon))
lon2 = np.reshape(lon,(nlat,nlon))
years = npzfile['years']
nyrs = len(years)
xam = np.zeros([nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam_all = np.zeros([niters,nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam = xam + tmp
xam_all[k,:,:,:] = tmp
# this is the sample mean computed with low-memory accumulation
xam = xam/len(mcdir)
# this is the sample mean computed with numpy on all data
xam_check = xam_all.mean(0)
# check..
max_err = np.max(np.max(np.max(xam_check - xam)))
if max_err > 1e-4:
print('max error = ' + str(max_err))
raise Exception('sample mean does not match what is in the ensemble files!')
# sample variance
xam_var = xam_all.var(0)
print(np.shape(xam_var))
print('\n shape of the ensemble array: ' + str(np.shape(xam_all)) +'\n')
print('\n shape of the ensemble-mean array: ' + str(np.shape(xam)) +'\n')
# Convert units to match verif dataset: from kg m-2 s-1 to mm (per year)
rho = 1000.0
for y in range(nyrs):
if calendar.isleap(int(years[y])):
xam[y,:,:] = 1000.*xam[y,:,:]*366.*86400./rho
else:
xam[y,:,:] = 1000.*xam[y,:,:]*365.*86400./rho
#################################################################
# BEGIN: load verification data #
#################################################################
print('\nloading verification data...\n')
# GPCP ----------------------------------------------------------
infile = datadir_precip+'/'+'GPCP/'+'GPCPv2.2_precip.mon.mean.nc'
verif_data = Dataset(infile,'r')
# Time
time = verif_data.variables['time']
time_obj = num2date(time[:],units=time.units)
time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))])
yrs_range = list(set(time_yrs))
# lat/lon
verif_lat = verif_data.variables['lat'][:]
verif_lon = verif_data.variables['lon'][:]
nlat_GPCP = len(verif_lat)
nlon_GPCP = len(verif_lon)
lon_GPCP, lat_GPCP = np.meshgrid(verif_lon, verif_lat)
# Precip
verif_precip_monthly = verif_data.variables['precip'][:]
[ntime,nlon_v,nlat_v] = verif_precip_monthly.shape
# convert mm/day monthly data to mm/year yearly data
GPCP_time = np.zeros(shape=len(yrs_range),dtype=np.int)
GPCP = np.zeros(shape=[len(yrs_range),nlat_GPCP,nlon_GPCP])
i = 0
for yr in yrs_range:
GPCP_time[i] = int(yr)
inds = np.where(time_yrs == yr)[0]
if calendar.isleap(yr):
nbdays = 366.
else:
nbdays = 365.
accum = np.zeros(shape=[nlat_GPCP, nlon_GPCP])
for k in range(len(inds)):
days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1]
accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month
GPCP[i,:,:] = accum # precip in mm
i = i + 1
# CMAP ----------------------------------------------------------
infile = datadir_precip+'/'+'CMAP/'+'CMAP_enhanced_precip.mon.mean.nc'
verif_data = Dataset(infile,'r')
# Time
time = verif_data.variables['time']
time_obj = num2date(time[:],units=time.units)
time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))])
yrs_range = list(set(time_yrs))
# lat/lon
verif_lat = verif_data.variables['lat'][:]
verif_lon = verif_data.variables['lon'][:]
nlat_CMAP = len(verif_lat)
nlon_CMAP = len(verif_lon)
lon_CMAP, lat_CMAP = np.meshgrid(verif_lon, verif_lat)
# Precip
verif_precip_monthly = verif_data.variables['precip'][:]
[ntime,nlon_v,nlat_v] = verif_precip_monthly.shape
# convert mm/day monthly data to mm/year yearly data
CMAP_time = np.zeros(shape=len(yrs_range),dtype=np.int)
CMAP = np.zeros(shape=[len(yrs_range),nlat_CMAP,nlon_CMAP])
i = 0
for yr in yrs_range:
CMAP_time[i] = int(yr)
inds = np.where(time_yrs == yr)[0]
if calendar.isleap(yr):
nbdays = 366.
else:
nbdays = 365.
accum = np.zeros(shape=[nlat_CMAP, nlon_CMAP])
for k in range(len(inds)):
days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1]
accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month
CMAP[i,:,:] = accum # precip in mm
i = i + 1
# ----------
# Reanalyses
# ----------
# Define month sequence for the calendar year
# (argument needed in upload of reanalysis data)
annual = list(range(1,13))
# 20th Century reanalysis (TCR) ---------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl +'/20cr'
datafile = vardef +'_20CR_185101-201112.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
TCR_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_TCR = np.unique(lats)
lon_TCR = np.unique(lons)
nlat_TCR, = lat_TCR.shape
nlon_TCR, = lon_TCR.shape
else:
# stored in 1D arrays
lon_TCR = lons
lat_TCR = lats
nlat_TCR = len(lat_TCR)
nlon_TCR = len(lon_TCR)
lon2_TCR, lat2_TCR = np.meshgrid(lon_TCR, lat_TCR)
TCRfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
TCR = dd[vardef]['value'] # Anomalies
# Conversion from kg m-2 s-1
rho = 1000.0
i = 0
for y in TCR_time:
if calendar.isleap(y):
TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*366.*86400./rho
TCR[i,:,:] = 1000.*TCR[i,:,:]*366.*86400./rho
else:
TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*365.*86400./rho
TCR[i,:,:] = 1000.*TCR[i,:,:]*365.*86400./rho
i = i + 1
# ERA 20th Century reanalysis (ERA20C) ---------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl +'/era20c'
datafile = vardef +'_ERA20C_190001-201012.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
ERA_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_ERA = np.unique(lats)
lon_ERA = np.unique(lons)
nlat_ERA, = lat_ERA.shape
nlon_ERA, = lon_ERA.shape
else:
# stored in 1D arrays
lon_ERA = lons
lat_ERA = lats
nlat_ERA = len(lat_ERA)
nlon_ERA = len(lon_ERA)
lon2_ERA, lat2_ERA = np.meshgrid(lon_ERA, lat_ERA)
ERAfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
ERA = dd[vardef]['value'] # Anomalies
# Conversion from kg m-2 s-1
rho = 1000.0
i = 0
for y in ERA_time:
if calendar.isleap(y):
ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*366.*86400./rho
ERA[i,:,:] = 1000.*ERA[i,:,:]*366.*86400./rho
else:
ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*365.*86400./rho
ERA[i,:,:] = 1000.*ERA[i,:,:]*365.*86400./rho
i = i + 1
# Plots of precipitation climatologies ---
# Climatology (annual accumulation)
GPCP_climo = np.nanmean(GPCP, axis=0)
CMAP_climo = np.nanmean(CMAP, axis=0)
TCR_climo = np.nanmean(TCRfull, axis=0)
ERA_climo = np.nanmean(ERAfull, axis=0)
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(GPCP_climo,lat_GPCP,lon_GPCP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,2)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(CMAP_climo,lat_CMAP,lon_CMAP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,3)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(TCR_climo,lat2_TCR,lon2_TCR,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,4)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(ERA_climo,lat2_ERA,lon2_ERA,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
fig.tight_layout()
plt.savefig('GPCP_CMAP_20CR_ERA_climo.png')
plt.close()
###############################################################
# END: load verification data #
###############################################################
# ----------------------------------------------------------
# Adjust so that all anomaly data pertain to the mean over a
# common user-defined reference period (e.g. 20th century)
# ----------------------------------------------------------
print('Re-center on %s-%s period' % (str(ref_period[0]), str(ref_period[1])))
stime = ref_period[0]
etime = ref_period[1]
# LMR
LMR = xam
smatch, ematch = find_date_indices(LMR_time,stime,etime)
LMR = LMR - np.mean(LMR[smatch:ematch,:,:],axis=0)
# verif
smatch, ematch = find_date_indices(GPCP_time,stime,etime)
GPCP = GPCP - np.mean(GPCP[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(CMAP_time,stime,etime)
CMAP = CMAP - np.mean(CMAP[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(TCR_time,stime,etime)
TCR = TCR - np.mean(TCR[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(ERA_time,stime,etime)
ERA = ERA - np.mean(ERA[smatch:ematch,:,:],axis=0)
print('GPCP : Global: mean=', np.nanmean(GPCP), ' , std-dev=', np.nanstd(GPCP))
print('CMAP : Global: mean=', np.nanmean(CMAP), ' , std-dev=', np.nanstd(CMAP))
print('TCR : Global: mean=', np.nanmean(TCR), ' , std-dev=', np.nanstd(TCR))
print('ERA : Global: mean=', np.nanmean(ERA), ' , std-dev=', np.nanstd(ERA))
print('LMR : Global: mean=', np.nanmean(LMR), ' , std-dev=', np.nanstd(LMR))
# -----------------------------------
# Regridding the data for comparisons
# -----------------------------------
print('\n regridding data to a common grid...\n')
iplot_loc= False
#iplot_loc= True
# create instance of the spherical harmonics object for each grid
specob_lmr = Spharmt(nlon,nlat,gridtype='regular',legfunc='computed')
specob_gpcp = Spharmt(nlon_GPCP,nlat_GPCP,gridtype='regular',legfunc='computed')
specob_cmap = Spharmt(nlon_CMAP,nlat_CMAP,gridtype='regular',legfunc='computed')
specob_tcr = Spharmt(nlon_TCR,nlat_TCR,gridtype='regular',legfunc='computed')
specob_era = Spharmt(nlon_ERA,nlat_ERA,gridtype='regular',legfunc='computed')
# truncate to a lower resolution grid (common:21, 42, 62, 63, 85, 106, 255, 382, 799)
ntrunc_new = 42 # T42
ifix = np.remainder(ntrunc_new,2.0).astype(int)
nlat_new = ntrunc_new + ifix
nlon_new = int(nlat_new*1.5)
# lat, lon grid in the truncated space
dlat = 90./((nlat_new-1)/2.)
dlon = 360./nlon_new
veclat = np.arange(-90.,90.+dlat,dlat)
veclon = np.arange(0.,360.,dlon)
blank = np.zeros([nlat_new,nlon_new])
lat2_new = (veclat + blank.T).T
lon2_new = (veclon + blank)
# create instance of the spherical harmonics object for the new grid
specob_new = Spharmt(nlon_new,nlat_new,gridtype='regular',legfunc='computed')
lmr_trunc = np.zeros([nyrs,nlat_new,nlon_new])
print('lmr_trunc shape: ' + str(np.shape(lmr_trunc)))
# loop over years of interest and transform...specify trange at top of file
iw = 0
if nya > 0:
iw = (nya-1)/2
cyears = list(range(trange[0],trange[1]))
lg_csave = np.zeros([len(cyears)])
lc_csave = np.zeros([len(cyears)])
lt_csave = np.zeros([len(cyears)])
le_csave = np.zeros([len(cyears)])
gc_csave = np.zeros([len(cyears)])
gt_csave = np.zeros([len(cyears)])
ge_csave = np.zeros([len(cyears)])
te_csave = np.zeros([len(cyears)])
lmr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
gpcp_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
cmap_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
tcr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
era_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
lmr_zm = np.zeros([len(cyears),nlat_new])
gpcp_zm = np.zeros([len(cyears),nlat_new])
cmap_zm = np.zeros([len(cyears),nlat_new])
tcr_zm = np.zeros([len(cyears),nlat_new])
era_zm = np.zeros([len(cyears),nlat_new])
k = -1
for yr in cyears:
k = k + 1
LMR_smatch, LMR_ematch = find_date_indices(LMR_time,yr-iw,yr+iw+1)
GPCP_smatch, GPCP_ematch = find_date_indices(GPCP_time,yr-iw,yr+iw+1)
CMAP_smatch, CMAP_ematch = find_date_indices(CMAP_time,yr-iw,yr+iw+1)
TCR_smatch, TCR_ematch = find_date_indices(TCR_time,yr-iw,yr+iw+1)
ERA_smatch, ERA_ematch = find_date_indices(ERA_time,yr-iw,yr+iw+1)
print('------------------------------------------------------------------------')
print('working on year... %5s' %(str(yr)))
print(' %5s LMR index = %5s : LMR year = %5s' %(str(yr), str(LMR_smatch), str(LMR_time[LMR_smatch])))
if GPCP_smatch:
print(' %5s GPCP index = %5s : GPCP year = %5s' %(str(yr), str(GPCP_smatch), str(GPCP_time[GPCP_smatch])))
if CMAP_smatch:
print(' %5s CMAP index = %5s : CMAP year = %5s' %(str(yr), str(CMAP_smatch), str(CMAP_time[CMAP_smatch])))
if TCR_smatch:
print(' %5s TCP index = %5s : TCR year = %5s' %(str(yr), str(TCR_smatch), str(TCR_time[TCR_smatch])))
if ERA_smatch:
print(' %5s ERA index = %5s : ERA year = %5s' %(str(yr), str(ERA_smatch), str(ERA_time[ERA_smatch])))
# LMR
pdata_lmr = np.mean(LMR[LMR_smatch:LMR_ematch,:,:],0)
lmr_trunc = regrid(specob_lmr, specob_new, pdata_lmr, ntrunc=nlat_new-1, smooth=None)
# GPCP
if GPCP_smatch and GPCP_ematch:
pdata_gpcp = np.mean(GPCP[GPCP_smatch:GPCP_ematch,:,:],0)
else:
pdata_gpcp = np.zeros(shape=[nlat_GPCP,nlon_GPCP])
pdata_gpcp.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_gpcp).all():
gpcp_trunc = np.zeros(shape=[nlat_new,nlon_new])
gpcp_trunc.fill(np.nan)
else:
gpcp_trunc = regrid(specob_gpcp, specob_new, pdata_gpcp, ntrunc=nlat_new-1, smooth=None)
# CMAP
if CMAP_smatch and CMAP_ematch:
pdata_cmap = np.mean(CMAP[CMAP_smatch:CMAP_ematch,:,:],0)
else:
pdata_cmap = np.zeros(shape=[nlat_CMAP,nlon_CMAP])
pdata_cmap.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_cmap).all():
cmap_trunc = np.zeros(shape=[nlat_new,nlon_new])
cmap_trunc.fill(np.nan)
else:
cmap_trunc = regrid(specob_cmap, specob_new, pdata_cmap, ntrunc=nlat_new-1, smooth=None)
# TCR
if TCR_smatch and TCR_ematch:
pdata_tcr = np.mean(TCR[TCR_smatch:TCR_ematch,:,:],0)
else:
pdata_tcr = np.zeros(shape=[nlat_TCR,nlon_TCR])
pdata_tcr.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_tcr).all():
tcr_trunc = np.zeros(shape=[nlat_new,nlon_new])
tcr_trunc.fill(np.nan)
else:
tcr_trunc = regrid(specob_tcr, specob_new, pdata_tcr, ntrunc=nlat_new-1, smooth=None)
# ERA
if ERA_smatch and ERA_ematch:
pdata_era = np.mean(ERA[ERA_smatch:ERA_ematch,:,:],0)
else:
pdata_era = np.zeros(shape=[nlat_ERA,nlon_ERA])
pdata_era.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_era).all():
era_trunc = np.zeros(shape=[nlat_new,nlon_new])
era_trunc.fill(np.nan)
else:
era_trunc = regrid(specob_era, specob_new, pdata_era, ntrunc=nlat_new-1, smooth=None)
if iplot_individual_years:
# Precipitation products comparison figures (annually-averaged anomaly fields)
fmin = verif_dict[var][3]; fmax = verif_dict[var][4]; nflevs=41
fig = plt.figure()
ax = fig.add_subplot(5,1,1)
LMR_plotter(lmr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('LMR '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,2)
LMR_plotter(gpcp_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('GPCP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_gpcp*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,3)
LMR_plotter(cmap_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('CMAP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_cmap*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,4)
LMR_plotter(tcr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('20CR-V2 '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_tcr*verif_dict[var][6],lat_TCR,lon_TCR,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,5)
LMR_plotter(era_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('ERA20C '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_era*verif_dict[var][6],lat_ERA,lon_ERA,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
fig.tight_layout()
plt.savefig(nexp+'_LMR_GPCP_CMAP_TCR_ERA_'+verif_dict[var][1]+'anom_'+str(yr)+'.png')
plt.close()
# save the full grids
lmr_allyears[k,:,:] = lmr_trunc
gpcp_allyears[k,:,:] = gpcp_trunc
cmap_allyears[k,:,:] = cmap_trunc
tcr_allyears[k,:,:] = tcr_trunc
era_allyears[k,:,:] = era_trunc
# -----------------------
# zonal-mean verification
# -----------------------
# LMR
lmr_zm[k,:] = np.mean(lmr_trunc,1)
# GPCP
fracok = np.sum(np.isfinite(gpcp_trunc),axis=1,dtype=np.float16)/float(nlon_GPCP)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
gpcp_zm[k,i] = np.nanmean(gpcp_trunc[i,:],axis=1)
gpcp_zm[k,boolnotok] = np.NAN
# CMAP
fracok = np.sum(np.isfinite(cmap_trunc),axis=1,dtype=np.float16)/float(nlon_CMAP)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
cmap_zm[k,i] = np.nanmean(cmap_trunc[i,:],axis=1)
cmap_zm[k,boolnotok] = np.NAN
# TCR
tcr_zm[k,:] = np.mean(tcr_trunc,1)
# ERA
era_zm[k,:] = np.mean(era_trunc,1)
if iplot_loc:
ncints = 30
cmap = 'bwr'
nticks = 6 # number of ticks on the colorbar
# set contours based on GPCP
maxabs = np.nanmax(np.abs(gpcp_trunc))
# round the contour interval, and then set limits to fit
dc = np.round(maxabs*2/ncints,2)
cl = dc*ncints/2.
cints = np.linspace(-cl,cl,ncints,endpoint=True)
# compare LMR with GPCP, CMAP, TCR and ERA
fig = plt.figure()
ax = fig.add_subplot(3,2,1)
m1 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(lmr_trunc))
cs = m1.contourf(lon2_new,lat2_new,lmr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m1.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('LMR '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,3)
m2 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(gpcp_trunc))
cs = m2.contourf(lon2_new,lat2_new,gpcp_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m2.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('GPCP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,4)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(cmap_trunc))
cs = m3.contourf(lon2_new,lat2_new,cmap_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('CMAP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,5)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(tcr_trunc))
cs = m3.contourf(lon2_new,lat2_new,tcr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('20CR-V2 '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,6)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(era_trunc))
cs = m3.contourf(lon2_new,lat2_new,era_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('ERA20C '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
plt.clim(-maxabs,maxabs)
# get these numbers by adjusting the figure interactively!!!
plt.subplots_adjust(left=0.05, bottom=0.45, right=0.95, top=0.95, wspace=0.1, hspace=0.0)
# plt.tight_layout(pad=0.3)
fig.suptitle(verif_dict[var][1] + ' for ' +str(nya) +' year centered average')
# anomaly correlation
lmrvec = np.reshape(lmr_trunc,(1,nlat_new*nlon_new))
gpcpvec = np.reshape(gpcp_trunc,(1,nlat_new*nlon_new))
cmapvec = np.reshape(cmap_trunc,(1,nlat_new*nlon_new))
tcrvec = np.reshape(tcr_trunc,(1,nlat_new*nlon_new))
eravec = np.reshape(era_trunc,(1,nlat_new*nlon_new))
# lmr <-> gpcp
indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
lg_csave[k] = | np.corrcoef(lmrvec[indok],gpcpvec[indok]) | numpy.corrcoef |
import numpy as np
import cv2
import imutils
import argparse
import os
from tensorflow import keras
from imutils import contours
from skimage.filters import threshold_local
from src.sudoku import *
from src.image_search import processing, sort
from src.model_prediction import model_prediction
def main():
# !! Select image path !!
path = "assets/samples/sample1.jpg"
# !! Load Model !!
model = keras.models.load_model('model/model_tf.h5')
# Load board
board = grid_operator(path, model)
# Solve it...
test()
solve_all([(board)], None, 0.0)
# Generate grid with given numers using AI
def grid_operator(image, model):
# Filter contours and fix lines, output --> grid
sudoku_rows, row, gray = sort(image)
# Define local variables
board, pos_iD = [], 0
for row in sudoku_rows:
for c in row:
mask = np.zeros(gray.shape, dtype=np.uint8)
cv2.drawContours(mask, [c], -1, (255, 255, 255), -1)
# Extract out the object and place into output image
image = | np.zeros_like(gray) | numpy.zeros_like |
# -*- coding: utf-8 -*-
import fire
from tqdm import tqdm
from arena_util import load_json
from arena_util import write_json
from arena_util import remove_seen
from arena_util import most_popular
import numpy as np
class MostPopular:
def _generate_answers(self, train, questions,song_meta):
song_infos = {}
for t in train:
song_infos[t['id']]=[song_meta[a] for a in t['songs']]
plylst_list = {}
for plylst, songs in song_infos.items():
plylst_list[plylst] = songs2vec(songs)
answers = []
for q in tqdm(questions):
answers.append({
"id": q["id"],
"songs": remove_seen(q["songs"], song_mp)[:100],
"tags": remove_seen(q["tags"], tag_mp)[:10],
})
return answers
def run(self, train_fname, question_fname, song_meta_fname):
print("Loading train file...")
train = load_json(train_fname)
print("Loading question file...")
questions = load_json(question_fname)
print("Loading song_meta file...")
song_meta = load_json(song_meta_fname)
print("Writing answers...")
answers = self._generate_answers(train, questions, song_meta)
write_json(answers, "results/results.json")
def one_hot_encode(song):
song_vec = np.zeros(30)
for genre in song['song_gn_gnr_basket']:
try:
song_vec[int(int(genre[2:])/100)-1] = 1
except:
pass
#print("error in : ",genre)
return song_vec
def normalize(v):
#norm = np.linalg.norm(v)
norm = np.sum(v)
if norm == 0:
return v
return v / norm
def songs2vec(songs):
plylst_vec_list = np.zeros(30)
for i in range(len(songs)):
plylst_vec_list += one_hot_encode(songs[i])
if | np.linalg.norm(plylst_vec_list) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Pitch Spelling using the ps13 algorithm.
References
----------
"""
import numpy as np
from collections import namedtuple
__all__ = ['estimate_spelling']
ChromamorpheticPitch = namedtuple('ChromamorpheticPitch', 'chromatic_pitch morphetic_pitch')
STEPS = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
UND_CHROMA = np.array([0, 2, 3, 5, 7, 8, 10], dtype=np.int)
ALTER = np.array(['n', '#', 'b'])
def estimate_spelling(note_array, method='ps13s1', *args, **kwargs):
"""Estimate pitch spelling using the ps13 algorithm [4]_, [5]_.
Parameters
----------
note_array : structured array
Array with score information
method : str (default 'ps13s1')
Pitch spelling algorithm. More methods will be added.
*args
positional arguments for the algorithm specified in `method`.
**kwargs
Keyword arguments for the algorithm specified in `method`.
Returns
-------
spelling : structured array
Array with pitch spellings. The fields are 'step', 'alter' and
'octave'
References
----------
.. [4] <NAME>. (2006). "The ps13 Pitch Spelling Algorithm". Journal
of New Music Research, 35(2):121.
.. [5] <NAME>. (2019). "RecurSIA-RRT: Recursive translatable
point-set pattern discovery with removal of redundant translators".
12th International Workshop on Machine Learning and Music. Würzburg,
Germany.
"""
if method == 'ps13s1':
ps = ps13s1
step, alter, octave = ps(note_array, *args, **kwargs)
spelling = np.empty(len(step), dtype=[('step', 'U1'), ('alter', np.int), ('octave', np.int)])
spelling['step'] = step
spelling['alter'] = alter
spelling['octave'] = octave
return spelling
def ps13s1(note_array, K_pre=10, K_post=40):
"""
ps13s1 Pitch Spelling Algorithm
"""
pitch_sort_idx = note_array['pitch'].argsort()
onset_sort_idx = np.argsort(note_array[pitch_sort_idx]['onset'], kind='mergesort')
sort_idx = pitch_sort_idx[onset_sort_idx]
re_idx = sort_idx.argsort() # o_idx[sort_idx]
sorted_ocp = np.column_stack(
(note_array[sort_idx]['onset'],
chromatic_pitch_from_midi(note_array[sort_idx]['pitch'])))
n = len(sorted_ocp)
# ChromaList
chroma_array = compute_chroma_array(sorted_ocp=sorted_ocp)
# ChromaVectorList
chroma_vector_array = compute_chroma_vector_array(chroma_array=chroma_array,
K_pre=K_pre,
K_post=K_post)
morph_array = compute_morph_array(chroma_array=chroma_array,
chroma_vector_array=chroma_vector_array)
morphetic_pitch = compute_morphetic_pitch(sorted_ocp, morph_array)
step, alter, octave = p2pn(sorted_ocp[:, 1], morphetic_pitch.reshape(-1, ))
# sort back pitch names
step = step[re_idx]
alter = alter[re_idx]
octave = octave[re_idx]
return step, alter, octave
def chromatic_pitch_from_midi(midi_pitch):
return midi_pitch - 21
def chroma_from_chromatic_pitch(chromatic_pitch):
return np.mod(chromatic_pitch, 12)
def pitch_class_from_chroma(chroma):
return np.mod(chroma - 3, 12)
def compute_chroma_array(sorted_ocp):
return chroma_from_chromatic_pitch(sorted_ocp[:, 1]).astype(np.int)
def compute_chroma_vector_array(chroma_array, K_pre, K_post):
"""
Computes the chroma frequency distribution within the context surrounding
each note.
"""
n = len(chroma_array)
chroma_vector = np.zeros(12, dtype=np.int)
for i in range(np.minimum(n, K_post)):
chroma_vector[chroma_array[i]] = 1 + chroma_vector[chroma_array[i]]
chroma_vector_list = [chroma_vector.copy()]
for i in range(1, n):
if i + K_post <= n:
chroma_vector[chroma_array[i + K_post - 1]] = 1 + chroma_vector[chroma_array[i + K_post - 1]]
if i - K_pre > 0:
chroma_vector[chroma_array[i - K_pre - 1]] = chroma_vector[chroma_array[i - K_pre - 1]] - 1
chroma_vector_list.append(chroma_vector.copy())
return np.array(chroma_vector_list)
def compute_morph_array(chroma_array, chroma_vector_array):
n = len(chroma_array)
# Line 1: Initialize morph array
morph_array = np.empty(n, dtype=np.int)
# Compute m0
# Line 2
init_morph = np.array([0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6], dtype=np.int)
# Line 3
c0 = chroma_array[0]
# Line 4
m0 = init_morph[c0]
# Line 5
morph_int = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6], dtype=np.int)
# Lines 6-8
tonic_morph_for_tonic_chroma = np.mod(m0 - morph_int[np.mod(c0 - np.arange(12), 12)], 7)
# Line 10
tonic_chroma_set_for_morph = [[] for i in range(7)]
# Line 11
morph_strength = np.zeros(7, dtype=np.int)
# Line 12
for j in range(n):
# Lines 13-15 (skipped line 9, since we do not need to
# initialize morph_for_tonic_chroma)
morph_for_tonic_chroma = np.mod(morph_int[np.mod(chroma_array[j]
- | np.arange(12) | numpy.arange |
# -*- coding: utf-8 -*-
"""This module implements the ITKrMM algorithm.
"""
import time
import logging
import functools
import multiprocessing as mp
import numpy as np
import numpy.random as rd
import numpy.linalg as lin
import scipy.sparse as sps
from ..tools.dico_learning import forward_patch_transform,\
inverse_patch_transform, CLS_init
from ..tools import PCA
from ..tools import sec2str
from ..tools import metrics
_logger = logging.getLogger(__name__)
class Dico_Learning_Executer:
"""Class to define execute dictionary learning algorithms.
The following class is a common code for most dictionary learning methods.
It performs the following tasks:
* reshapes the data in patch format,
* performs low-rank component estimation,
* starts the dictionary learning method,
* reshape output data,
* handle CLS initialization to speed-up computation.
Attributes
----------
Y: (m, n) or (m, n, l) numpy array
The input data.
Y_PCA: (m, n) or (m, n, PCA_th) numpy array
The input data in PCA space.
Its value is Y if Y is 2D.
mask: (m, n) numpy array
The acquisition mask.
P: int
The width (or height) of the patch.
K: int
The dictionary dimension. This dictionary is composed of L low-rank
components and K-L non-low-rank components.
L: int
The number of low rank components to learn.
S: int
The code sparsity level.
Nit_lr: int
The number of iterations for the low rank estimation.
Nit: int
The number of iterations.
CLS_init: dico
CLS initialization inofrmation.
verbose: bool
The verbose parameter. Default is True.
mean_std: 2-tuple
Tuple of size 2 which contains the data mean and std.
data: (N, D) numpy array
The Y data in patch format. N (resp. D) is the number of voxels per
patch (resp. patches).
mdata: (N, D) numpy array
The mask in patch format. N (resp. D) is the number of voxels per
patch (resp. patches).
init: (N, L) numpy array
The low-rank estimation initialization in patch format. N is the
number of voxels per patch.
init: (N, K-L) numpy array
The dictionary-learning initialization in patch format. N is the
number of voxels per patch.
PCA_operator: PcaHandler object
The PCA operator.
Note
----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the CLS :code:`init` optional argument.
"""
def __init__(self, Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""Dico_Learning_Executer __init__ function.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None, (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 2*P**2-1.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is P-L.
This should be lower than K-L.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init_lr: optional, (N, L) numpy array
Initialization for low-rank component. N is the number of voxel in
a patch. Default is random initialization.
init: optional, (N, K-L) numpy array
Initialization for dictionary learning. N is the number of voxel
in a patch. Default is random initialization.
CLS_init: optional, dico
CLS initialization infrmation. See Note for details.
Default is None.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
verbose: bool
The verbose parameter. Default is True.
Note
----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the CLS :code:`init` optional argument.
"""
self.Y = Y
if mask is None:
mask = np.ones(Y.shape[:2])
self.mask = mask
self.P = P
self.K = K if K is not None else 2*P**2-1
self.L = L
self.S = S if S is not None else P-L
self.Nit = Nit
self.Nit_lr = Nit_lr
self.CLS_init = CLS_init
self.verbose = verbose
if CLS_init is not None and Y.ndim != 3:
_logger.warning(
'Dico learning will not be initialized with CLS as input data '
'is not 3D. Random init used.')
if (S > P**2 and Y.ndim == 2) or (
S > P**2*Y.shape[-1] and Y.ndim == 3):
raise ValueError('S input is smaller than the patch size.')
# Perform PCA if Y is 3D
if self.Y.ndim == 3:
PCA_operator = PCA.PcaHandler(
Y, mask, PCA_transform=PCA_transform, PCA_th=PCA_th,
verbose=verbose)
Y_PCA, PCA_th = PCA_operator.Y_PCA, PCA_operator.PCA_th
self.PCA_operator = PCA_operator
if CLS_init is not None and 'init' in CLS_init:
self.CLS_init['init'] = PCA_operator.direct(
self.CLS_init['init'])
else:
Y_PCA = Y.copy()
self.PCA_operator = None
# Normalize and center
Y_m, Y_std = Y_PCA.mean(), Y_PCA.std()
Y_PCA = (Y_PCA - Y_m)/Y_std
if CLS_init is not None and 'init' in CLS_init:
self.CLS_init['init'] = (self.CLS_init['init'] - Y_m)/Y_std
self.mean_std = (Y_m, Y_std)
self.Y_PCA = Y_PCA
# Prepare data
obs_mask = mask if Y.ndim == 2 else np.tile(
mask[:, :, np.newaxis], [1, 1, Y_PCA.shape[2]])
# Observation
self.data = forward_patch_transform(Y_PCA * obs_mask, self.P)
# Mask
self.mdata = forward_patch_transform(obs_mask, self.P)
self.data *= self.mdata
# Initialization
if init_lr is None:
self.init_lr = np.squeeze(rd.randn(self.data.shape[0], self.L))
else:
self.init_lr = init_lr
if init is None:
self.init = rd.randn(self.data.shape[0], self.K - self.L)
else:
self.init = init
def execute(self, method='ITKrMM'):
"""Executes dico learning restoration.
Arguments
---------
method: str
The method to use, which can be 'ITKrMM' or 'wKSVD'.
Default is 'ITKrMM'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Note
----
The output information keys are:
- 'time': Execution time in seconds.
- 'lrc': low rank component.
- 'dico': Estimated dictionary.
- 'E': Evolution of the error.
"""
# Welcome message
if self.verbose:
print("-- {} reconstruction algorithm --".format(method))
start = time.time()
# If CLS init, get init dico and lrc
if self.CLS_init is not None and self.Y.ndim == 3:
if self.verbose:
print('Learning low rank component and init with CLS...')
lrc, dico_init = self.get_CLS_init()
self.init_lr = lrc
self.init = dico_init
else:
# Otherwise, we should estimate the low-rank component.
if self.verbose:
print('Learning low rank component...')
if self.L > 0:
local_init = self.init_lr if self.L > 1 else \
self.init_lr[:, None]
lrc = np.zeros((self.data.shape[0], self.L))
for cnt in range(self.L):
lrc_init = local_init[:, cnt]
if cnt > 0:
lrc_init -= lrc[:, :cnt] @ lrc[:, :cnt].T @ lrc_init
lrc_init /= np.linalg.norm(lrc_init)
lrc[:, cnt] = rec_lratom(
self.data,
self.mdata,
lrc[:, :cnt] if cnt > 0 else None,
self.Nit_lr,
lrc_init)
else:
lrc = None
#
# Learn Dictionary
#
if self.verbose:
print('Learning dictionary...'.format(method))
# Remove lrc and ensures othogonality of input dico initialization.
if self.L > 1:
self.init -= lrc @ lrc.T @ self.init
self.init = self.init @ np.diag(1 / lin.norm(self.init, axis=0))
# Call reconstruction algo
params = {
'data': self.data,
'masks': self.mdata,
'K': self.K,
'S': self.S,
'lrc': lrc,
'Nit': self.Nit,
'init': self.init,
'verbose': self.verbose}
if method == 'ITKrMM':
dico_hat, info = itkrmm_core(**params)
elif method == 'wKSVD':
dico_hat, info = wKSVD_core(**params, preserve_DC=True)
else:
raise ValueError(
'Unknown method parameter for Dico_Learning_Executer object')
#
# Reconstruct data
#
Xhat = self.dico_to_data(dico_hat)
# Reshape output dico
p = self.P
shape_dico = (self.K, p, p) if self.Y.ndim == 2 else (
self.K, p, p, self.Y_PCA.shape[-1])
dico = dico_hat.T.reshape(shape_dico)
# Manage output info
dt = time.time() - start
InfoOut = {'dico': dico, 'time': dt}
if self.CLS_init is not None:
dico_CLS = np.hstack((self.init_lr, self.init))
InfoOut['CLS_init'] = dico_CLS.T.reshape(shape_dico)
if self.PCA_operator is not None:
PCA_info = {
'H': self.PCA_operator.H,
'PCA_th': self.PCA_operator.PCA_th,
'Ym': np.squeeze(self.PCA_operator.Ym[0, 0, :])
}
InfoOut['PCA_info'] = PCA_info
if self.verbose:
print(
"Done in {}.\n---".format(sec2str.sec2str(dt)))
return Xhat, InfoOut
def dico_to_data(self, dico):
"""Estimate reconstructed data based on the provided dictionary.
Arguments
---------
dico: (P**2, K) or (P**2*l, K) numpy array
The estimated dictionary.
Returns
-------
(m, n) or (m, n, l) numpy array
The reconstructed data
"""
# Recontruct data from dico and coeffs.
coeffs = OMPm(dico.T, self.data.T, self.S, self.mdata.T)
outpatches = sps.csc_matrix.dot(dico, (coeffs.T).tocsc())
# Transform from patches to data.
Xhat = inverse_patch_transform(outpatches, self.Y_PCA.shape)
Xhat = Xhat * self.mean_std[1] + self.mean_std[0]
if self.Y.ndim == 3:
Xhat = self.PCA_operator.inverse(Xhat)
return Xhat
def get_CLS_init(self):
"""Computes the initialization with CLS.
Returns
-------
(N, L) numpy array
Low-rank component estimation. N is the number of voxels in a
patch.
(N, K-L) numpy array
Dictionary initialization. N is the number of voxels in a patch.
"""
# Get initialization dictionary
D, C, Xhat, InfoOut = CLS_init(
self.Y_PCA,
mask=self.mask,
P=self.P,
K=self.K - self.L,
S=self.S,
PCA_transform=False,
verbose=self.verbose,
**self.CLS_init)
# Get low rank component
CLS_data = forward_patch_transform(Xhat, self.P)
Uec, _, _ = np.linalg.svd(CLS_data)
init_lr = Uec[:, :self.L]
dico_init = D.T
return init_lr, dico_init
def ITKrMM(Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""ITKrMM restoration algorithm.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None or (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 128.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is 20.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init: (P**2, K+L) or (P**2*l, K+L) numpy array
Initialization dictionary.
CLS_init: optional, dico
CLS initialization inofrmation. See Notes for details.
Default is None.
xref: optional, (m, n) or (m, n, l) numpy array
Reference image to compute error evolution.
Default is None for input Y data.
verbose: optional, bool
The verbose parameter. Default is True.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Notes
-----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the :code:`init` optional argument.
The output information keys are:
* :code:`time`: Execution time in seconds.
* :code:`lrc`: low rank component.
* :code:`dico`: Estimated dictionary.
* :code:`E`: Evolution of the error.
"""
obj = Dico_Learning_Executer(
Y, mask, P, K, L, S, Nit_lr, Nit, init_lr, init, CLS_init,
PCA_transform, PCA_th, verbose)
return obj.execute(method='ITKrMM')
def wKSVD(Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""wKSVD restoration algorithm.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None or (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 128.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is 20.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init: (P**2, K+L) or (P**2*l, K+L) numpy array
Initialization dictionary.
CLS_init: optional, dico
CLS initialization inofrmation. See Notes for details.
Default is None.
xref: optional, (m, n) or (m, n, l) numpy array
Reference image to compute error evolution.
Default is None for input Y data.
verbose: optional, bool
The verbose parameter. Default is True.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Notes
-----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the :code:`init` optional argument.
The output information keys are:
* :code:`time`: Execution time in seconds.
* :code:`lrc`: low rank component.
* :code:`dico`: Estimated dictionary.
* :code:`E`: Evolution of the error.
"""
obj = Dico_Learning_Executer(
Y, mask, P, K, L, S, Nit_lr, Nit, init_lr, init, CLS_init,
PCA_transform, PCA_th, verbose)
return obj.execute(method='wKSVD')
def rec_lratom(data, masks=None, lrc=None, Nit=10, inatom=None, verbose=True):
"""Recover new low rank atom equivalent to itkrmm with K = S = 1.
Arguments
---------
data: (d, N) numpy array
The (corrupted) training signals as its columns.
masks: (d, N) numpy array
Mask data as its columns.
masks(.,.) in {0,1}.
Default is masks = 1.
lrc: (d, n) numpy array
Orthobasis for already recovered low rank component.
Default is None.
Nit: int
Number of iterations.
Default is 10.
inatom: (d, ) numpy array
Initialisation that should be normalized.
Default is None for random.
verbose: bool
If verbose is True, information is sent to the output.
Default is True.
Returns
-------
atom: (d, ) numpy array
Estimated low rank component.
"""
d, N = data.shape
if masks is None:
masks = np.ones((d, N))
data = data*masks # Safeguard
# Create random initial point if needed or check input initialization is
# normalized.
if inatom is None:
inatom = np.random.randn(d)
inatom = inatom/np.linalg.norm(inatom)
#
if lrc is not None:
# If lrc has 1 dimension, one should add a dimension to have correct
# L.
if lrc.ndim == 1:
lrc = lrc[:, np.newaxis]
L = lrc.shape[1]
# Remove low rank component from initial atom and re-normalize.
inatom = inatom - lrc @ lrc.T @ inatom
inatom = inatom/np.linalg.norm(inatom)
# Project data into orthogonal of lrc
# start = time.time()
for n in range(N):
lrcMn = lrc * np.tile(masks[:, n][:, np.newaxis], [1, L])
data[:, n] -= lrcMn @ np.linalg.pinv(lrcMn) @ data[:, n]
# if verbose:
# print('Elapsed time: {}'.format(
# sec2str.sec2str(time.time()-start)))
#
# Start estimation
atom_k = inatom
for it in range(Nit):
ip = atom_k.T.dot(data)
maskw = np.sum(masks, 1)
if lrc is None:
atom_kp1 = data @ np.sign(ip).T
else:
atom_kp1 = np.zeros(atom_k.shape)
for n in range(N):
# The masked basis of the current low-rank space.
lrcplus = np.concatenate(
(lrc, atom_k[:, np.newaxis]),
axis=1) * np.tile(masks[:, n][:, np.newaxis], [1, L+1])
# The data is projected into the orthogonal space of lrcplus.
resn = data[:, n] - \
lrcplus @ np.linalg.pinv(lrcplus) @ data[:, n]
# The masked current estimated lrc.
atom_k_mm = atom_k * masks[:, n]
# Calculate incremented atom_kp1.
atom_kp1 += \
np.sign(ip[n]) * resn + \
np.abs(ip[n])*atom_k_mm/np.sum(atom_k_mm**2)
# Normalize with mask score.
if maskw.min() > 0:
atom_kp1 /= maskw
else:
atom_kp1 /= (maskw + 1e-2)
# Remove previous low rank components from current estimate.
if lrc is not None:
atom_kp1 -= lrc @ lrc.T @ atom_kp1
# Re-normalize current estimation
atom_kp1 /= np.linalg.norm(atom_kp1)
# Update
atom_k = atom_kp1
return atom_k
def OMPm(D, X, S, Masks=None):
r"""Masked OMP.
This is a modified version of OMP to account for corruptions in the signal.
Consider some input data :math:`\mathbf{X}` (whose shape is (N, P) where N
is the number of signals) which are masked by :math:`\mathbf{M}`. Given an
input dictionary :math:`\mathbf{D}` of shape (K, P), this algorithm returns
the optimal sparse :math:`\hat{\mathbf{A}}` matrix such that:
.. math::
\gdef \A {\mathbf{A}}
\gdef \M {\mathbf{M}}
\gdef \X {\mathbf{X}}
\gdef \D {\mathbf{D}}
\begin{aligned}
\hat{\A} &= \arg\min_\A \frac{1}{2}||\M\X - \M(\A\D)||_F^2\\
&s.t. \max_k||\A_{k,:}||_{0} \leq S
\end{aligned}
A slightly different modification of Masked OMP is available in "Sparse
and Redundant Representations: From Theory to Applications in Signal and
Image Processing," the book written by <NAME> in 2010.
Arguments
---------
D: (K, P) numpy array
The dictionary.
Its rows MUST be normalized, i.e. their norm must be 1.
X: (N, P) numpy array
The masked signals to represent.
S: int
The max. number of coefficients for each signal.
Masks: optional, (N, P) numpy array or None
The sampling masks that should be 1 if sampled and 0 otherwise.
Default is None for full sampling.
Returns
-------
(N, K) sparse coo_matrix array
sparse coefficient matrix.
"""
# Get some dimensions
N = X.shape[0] # # of pixels in atoms
P = X.shape[1] # # of signals
K = D.shape[0] # # of atoms
if Masks is None:
Masks = np.ones((N, P))
# Prepare the tables that will be used to create output sparse matrix.
iTab = np.zeros(N*S)
jTab = np.zeros(N*S)
dataTab = np.zeros(N*S)
Ncomp = 0 # Count the number of nnz elements for output.
for k in range(N):
# Local mask and signal # k
x = X[k, :]
m = Masks[k, :]
xm = x*m # Masked data
# Masked atoms
Dm = D * np.tile(m[np.newaxis, :], [K, 1])
# Normalization of available masked atoms
scale = np.linalg.norm(Dm, axis=1)
nz = np.flatnonzero(scale > 1e-3 / np.sqrt(N))
scale[nz] = 1/scale[nz]
# Initialize residuals
residual = xm
# Initialize the sequence of atom indexes
indx = np.zeros(S, dtype=int)
for j in range(S):
# Projection of the residual into dico
proj = scale * (Dm @ residual)
# Search max scalar product
indx[j] = np.argmax(np.abs(proj))
# Update residual
a = np.linalg.pinv(Dm[indx[:j+1], :].T) @ xm
residual = xm - Dm[indx[:j+1], :].T @ a
# In case of small residual, break
if np.linalg.norm(residual)**2 < 1e-6:
break
iTab[Ncomp:Ncomp+j+1] = k * np.ones(j+1)
jTab[Ncomp:Ncomp+j+1] = indx[:j+1]
dataTab[Ncomp:Ncomp+j+1] = a
Ncomp += j+1
# Build sparse output as scipy.sparse.coo_matrix
return sps.coo_matrix((dataTab, (iTab, jTab)), shape=(N, K))
def _itkrmm_multi(n, lrc, data, masks, L):
"""
"""
lrcMn = lrc * np.tile(masks[:, n][:, np.newaxis], [1, L])
return lrcMn @ np.linalg.pinv(lrcMn) @ data[:, n]
def itkrmm_core(
data, masks=None, K=None, S=1, lrc=None, Nit=50, init=None,
verbose=True, parent=None):
"""Iterative Thresholding and K residual Means masked.
Arguments
---------
data: (d, N) numpy array
The (corrupted) training signals as its columns.
masks: optional, None, (d, N) numpy array
The masks as its columns.
masks(.,.) in {0,1}.
Default is None for full sampling.
K: optional, None or int
Dictionary size.
Default is None for d.
S: optional, int
Desired or estimated sparsity level of the signals.
Default is 1.
lrc: optional, None or (d, L) numpy array
Orthobasis for low rank component. Default is None.
Nit: optional, int
Number of iterations.
Default is 50.
init: optional, None or (d, K-L) numpy array
Initialisation, unit norm column matrix.
Here, L is the number of low rank components.
Default is None for random.
verbose: optional, optional, bool
The verbose parameter.
Default is True.
parent: optional, None or Dico_Learning_Executer object
The Dico_Learning_Executer object that called this function.
If this is not None, the SNR between initial true data (given
throught the `xref`argument of Dico_Learning_Executer) and the
currently reconstructed data will be computed for each
iteration. As this means one more OMPm per iteration, this is
quite longer.
Default is None for faster code and non-SNR output.
Returns
-------
(d, K) numpy array
Estimated dictionary
dictionary
Output information. See Note.
Note
----
The output dictionary contains the following keys.
* `time` (float): Execution time in seconds.
* 'SNR' (None, (Nit, ) array): Evolution of the SNR across the
iterations in case `parent`is not None.
"""
# d is patch size, N is # of patches.
d, N = data.shape
if masks is None:
masks = np.ones(data.shape)
data = data*masks # safeguard
if K is None:
K = data.shape[0]
if lrc is not None:
L = 1 if lrc.ndim == 1 else lrc.shape[1]
K = K - L
if N < K-1:
_logger.warning(
'Less training signals than atoms: trivial solution is data.')
return data, None
if init is not None and not np.array_equal(init.shape, | np.array([d, K]) | numpy.array |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This module defines the **erodibility** and **thickness** of previously defined stratigraphic layers.
"""
import os
import glob
import time
import h5py
import numpy
import pandas
from scipy import interpolate
from scipy.spatial import cKDTree
class eroMesh:
"""
This class builds the erodibility and thickness of underlying initial stratigraphic layers.
Args:
layNb: total number of erosion stratigraphic layers
eroMap: erodibility map for each erosion stratigraphic layers
eroVal: erodibility value for each erosion stratigraphic layers
eroTop: erodibility value for reworked sediment
thickMap: thickness map for each erosion stratigraphic layers
thickVal: thickness value for each erosion stratigraphic layers
xyTIN: numpy float-type array containing the coordinates for each nodes in the TIN (in m)
regX: numpy array containing the X-coordinates of the regular input grid.
regY: numpy array containing the Y-coordinates of the regular input grid.
bPts: boundary points for the TIN.
ePts: boundary points for the regular grid.
folder: name of the output folder.
rfolder: restart folder.
rstep: restart step.
"""
def __init__(
self,
layNb,
eroMap,
eroVal,
eroTop,
thickMap,
thickVal,
xyTIN,
regX,
regY,
bPts,
ePts,
folder,
rfolder=None,
rstep=0,
):
self.regX = regX
self.regY = regY
self.layNb = layNb + 1
nbPts = len(xyTIN[:, 0])
self.folder = folder
# Build erosion layers
# If we restart a simulation
if rstep > 0:
if os.path.exists(rfolder):
folder = rfolder + "/h5/"
else:
raise ValueError(
"The restart folder is missing or the given path is incorrect."
)
df = h5py.File("%s/h5/erolay.time%s.hdf5" % (rfolder, rstep), "r")
self.thickness = numpy.array((df["/elayDepth"]))
self.Ke = numpy.array((df["/elayKe"]))
# Get erodibility from erosive layer thicknesses
self.erodibility = numpy.zeros(nbPts)
for k in range(self.layNb):
existIDs = numpy.where(
numpy.logical_and(
self.thickness[:, k] > 0.0, self.erodibility[:] == 0.0
)
)[0]
self.erodibility[existIDs] = self.Ke[existIDs, k]
if len(numpy.where(self.erodibility == 0)[0]) == 0:
break
# Build the underlying erodibility mesh and associated thicknesses
else:
# Initial top layer (id=0) is for reworked sediment (freshly deposited)
self.thickness = numpy.zeros((nbPts, self.layNb), dtype=float)
self.Ke = numpy.zeros((nbPts, self.layNb), dtype=float)
self.thickness[:, 0] = 0
self.Ke[:, 0] = eroTop
# Define inside area kdtree
inTree = cKDTree(xyTIN[bPts : ePts + bPts, :])
dist, inID = inTree.query(xyTIN[:bPts, :], k=1)
inID += bPts
# Loop through the underlying layers
for l in range(1, self.layNb):
# Uniform erodibility value
if eroMap[l - 1] == None:
self.Ke[:, l] = eroVal[l - 1]
# Erodibility map
else:
eMap = pandas.read_csv(
str(eroMap[l - 1]),
sep=r"\s+",
engine="c",
header=None,
na_filter=False,
dtype=numpy.float,
low_memory=False,
)
reMap = numpy.reshape(
eMap.values, (len(self.regX), len(self.regY)), order="F"
)
self.Ke[bPts:, l] = interpolate.interpn(
(self.regX, self.regY), reMap, xyTIN[bPts:, :], method="nearest"
)
# Assign boundary nodes
tmpK = self.Ke[bPts:, l]
self.Ke[:bPts, l] = tmpK[inID]
# Uniform thickness value
if thickMap[l - 1] == None:
self.thickness[:, l] = thickVal[l - 1]
# Thickness map
else:
tMap = pandas.read_csv(
str(thickMap[l - 1]),
sep=r"\s+",
engine="c",
header=None,
na_filter=False,
dtype=numpy.float,
low_memory=False,
)
rtMap = numpy.reshape(
tMap.values, (len(self.regX), len(self.regY)), order="F"
)
self.thickness[bPts:, l] = interpolate.interpn(
(self.regX, self.regY), rtMap, xyTIN[bPts:, :], method="linear"
)
# Assign boundary nodes
tmpH = self.thickness[bPts:, l]
self.thickness[:bPts, l] = tmpH[inID]
# Define active layer erodibility
self.erodibility = numpy.zeros(nbPts)
for l in range(1, self.layNb):
# Get erodibility coefficients from active underlying layers
tmpIDs = numpy.where(
numpy.logical_and(
self.thickness[:, l] > 0.0, self.erodibility[:] == 0.0
)
)[0]
self.erodibility[tmpIDs] = self.Ke[tmpIDs, l]
if len(numpy.where(self.erodibility == 0)[0]) == 0:
break
# Bottom layer is supposed to be infinitely thick
self.thickness[:, self.layNb - 1] += 1.0e6
return
def getErodibility(self, cumThick):
"""
Get the erodibility values for the surface based on underlying erosive stratigraphic layer.
Args:
cumThick: numpy float-type array containing the cumulative erosion/deposition of the nodes in the TIN
"""
# Update deposition
depIDs = numpy.where(cumThick >= 0.0)[0]
self.thickness[depIDs, 0] += cumThick[depIDs]
# Update erosion
eroIDs = numpy.where(cumThick < 0.0)[0]
if len(eroIDs) > 0:
for k in range(self.layNb):
# Update thickness for remaining layers
eIDs = numpy.where(
numpy.logical_and(
self.thickness[eroIDs, k] > 0.0,
self.thickness[eroIDs, k] >= -cumThick[eroIDs],
)
)[0]
if len(eIDs) > 0:
self.thickness[eroIDs[eIDs], k] += cumThick[eroIDs[eIDs]]
cumThick[eroIDs[eIDs]] = 0.0
# Nullify eroded layer thicknesses and update erosion values
eIDs = numpy.where(
numpy.logical_and(
self.thickness[eroIDs, k] > 0.0, cumThick[eroIDs] < 0.0
)
)[0]
if len(eIDs) > 0:
cumThick[eroIDs[eIDs]] += self.thickness[eroIDs[eIDs], k]
self.thickness[eroIDs[eIDs], k] = 0.0
# Ensure non-negative values
tmpIDs = numpy.where(self.thickness[:, k] < 0.0)[0]
if len(tmpIDs) > 0:
self.thickness[tmpIDs, k] = 0.0
if len( | numpy.where(cumThick < 0) | numpy.where |
import sys
import os
import numpy as np
import nibabel as nib
import vnmrjpy as vj
import warnings
import matplotlib.pyplot as plt
class KspaceMaker():
"""Class to build the k-space from the raw fid data based on procpar.
Raw fid_data is numpy.ndarray(blocks, traces * np) format. Should be
untangled based on 'seqcon' or 'seqfil' parameters.
seqcon chars refer to (echo, slice, Pe1, Pe2, Pe3)
Should support compressed sensing
In case of Compressed sensing the reduced kspace is filled with zeros
to reach the intended final shape
Leave rest of reconstruction to other classes/functions
INPUT: fid data = np.ndarra([blocks, np*traces])
fid header
procpar
METHODS:
make():
return kspace = nump.ndarray\
([rcvrs, phase, read, slice, echo*time])
"""
def __init__(self, fid_data, fidheader, procpar, verbose=False):
"""Reads procpar"""
def _get_arrayed_AP(p):
"""check for arrayed pars in procpar
Return: dictionary {par : array_length}
"""
AP_dict = {}
for par in ['tr', 'te', 'fa']:
pass
return AP_dict
self.p = vj.io.ProcparReader(procpar).read()
self.fid_header = fidheader
self.rcvrs = str(self.p['rcvrs']).count('y')
self.arrayed_AP = _get_arrayed_AP(self.p)
apptype = self.p['apptype']
self.config = vj.config
self.verbose = verbose
self.procpar = procpar
# decoding skipint parameter
# TODO
# final kspace shape from config file
self.dest_shape = (vj.config['rcvr_dim'],\
vj.config['pe_dim'],\
vj.config['ro_dim'],\
vj.config['slc_dim'],\
vj.config['et_dim'])
self.pre_kspace = np.vectorize(complex)(fid_data[:,0::2],\
fid_data[:,1::2])
self.pre_kspace = np.array(self.pre_kspace,dtype='complex64')
# check for arrayed parameters, save the length for later
self.array_length = vj.util.calc_array_length(fid_data.shape,procpar)
self.blocks = fid_data.shape[0] // self.array_length
if verbose:
print('Making k-space for '+ str(apptype)+' '+str(self.p['seqfil'])+\
' seqcon: '+str(self.p['seqcon']))
def print_fid_header(self):
for item in self.fhdr.keys():
print(str('{} : {}').format(item, self.fhdr[item]))
def make(self):
"""Build k-space from fid data
Return:
kspace=numpy.ndarray([rcvrs,phase,readout,slice,echo/time])
"""
def _is_interleaved(ppdict):
res = (int(ppdict['sliceorder']) == 1)
return res
def _is_evenslices(ppdict):
try:
res = (int(ppdict['ns']) % 2 == 0)
except:
res = (int(ppdict['pss']) % 2 == 0)
return res
def make_im2D():
"""Child method of 'make', provides the same as vnmrj im2Drecon"""
p = self.p
rcvrs = int(p['rcvrs'].count('y'))
(read, phase, slices) = (int(p['np'])//2, \
int(p['nv']), \
int(p['ns']))
shiftaxis = (self.config['pe_dim'],self.config['ro_dim'])
if 'ne' in p.keys():
echo = int(p['ne'])
else:
echo = 1
time = 1
finalshape = (rcvrs, phase, read, slices,echo*time*self.array_length)
final_kspace = np.zeros(finalshape,dtype='complex64')
for i in range(self.array_length):
kspace = self.pre_kspace[i*self.blocks:(i+1)*self.blocks,...]
if p['seqcon'] == 'nccnn':
shape = (self.rcvrs, phase, slices, echo*time, read)
kspace = np.reshape(kspace, shape, order='C')
kspace = np.moveaxis(kspace, [0,1,4,2,3], self.dest_shape)
elif p['seqcon'] == 'nscnn':
raise(Exception('not implemented'))
elif p['seqcon'] == 'ncsnn':
preshape = (self.rcvrs, phase, slices*echo*time*read)
shape = (self.rcvrs, phase, slices, echo*time, read)
kspace = np.reshape(kspace, preshape, order='F')
kspace = np.reshape(kspace, shape, order='C')
kspace = np.moveaxis(kspace, [0,1,4,2,3], self.dest_shape)
elif p['seqcon'] == 'ccsnn':
preshape = (self.rcvrs, phase, slices*echo*time*read)
shape = (self.rcvrs, phase, slices, echo*time, read)
kspace = np.reshape(kspace, preshape, order='F')
kspace = np.reshape(kspace, shape, order='C')
kspace = np.moveaxis(kspace, [0,1,4,2,3], self.dest_shape)
else:
raise(Exception('Not implemented yet'))
if _is_interleaved(p): # 1 if interleaved slices
if _is_evenslices(p):
c = np.zeros(kspace.shape, dtype='complex64')
c[...,0::2,:] = kspace[...,:slices//2,:]
c[...,1::2,:] = kspace[...,slices//2:,:]
kspace = c
else:
c = np.zeros(kspace.shape, dtype='complex64')
c[...,0::2,:] = kspace[...,:(slices+1)//2,:]
c[...,1::2,:] = kspace[...,(slices-1)//2+1:,:]
kspace = c
final_kspace[...,i*echo*time:(i+1)*echo*time] = kspace
self.kspace = final_kspace
return final_kspace
def make_im2Dcs():
"""
These (*cs) are compressed sensing variants
"""
def decode_skipint_2D(skipint):
pass
raise(Exception('not implemented'))
def make_im2Depi():
p = self.p
kspace = self.pre_kspace
print(kspace.shape)
nseg = p['nseg']
kzero = int(p['kzero'])
images = int(p['images']) # repetitions
time = images
if p['navigator'] == 'y':
pluspe = 1 + int(p['nnav']) # navigator echo + unused
else:
pluspe = 1 # unused only
print('images {}'.format(images))
print('nseg {}'.format(nseg))
print('ns {}'.format(p['ns']))
if p['pro'] != 0:
(read, phase, slices) = (int(p['nread']), \
int(p['nphase']), \
int(p['ns']))
else:
(read, phase, slices) = (int(p['nread'])//2, \
int(p['nphase']), \
int(p['ns']))
if p['seqcon'] == 'ncnnn':
preshape = (self.rcvrs, phase+pluspe, slices, time, read)
print(kspace.size)
tmp = np.zeros(preshape)
print(tmp.size)
kspace = np.reshape(kspace, preshape, order='c')
def make_im2Depics():
raise(Exception('not implemented'))
def make_im2Dfse():
warnings.warn('May not work correctly')
kspace = self.pre_kspace
p = self.p
petab = vj.util.getpetab(self.procpar,is_procpar=True)
nseg = int(p['nseg']) # seqgments
etl = int(p['etl']) # echo train length
kzero = int(p['kzero'])
images = int(p['images']) # repetitions
(read, phase, slices) = (int(p['np'])//2, \
int(p['nv']), \
int(p['ns']))
shiftaxis = (self.config['pe_dim'],self.config['ro_dim'])
echo = 1
time = images
phase_sort_order = np.reshape(np.array(petab),petab.size,order='C')
# shift to positive
phase_sort_order = phase_sort_order + phase_sort_order.size//2-1
if p['seqcon'] == 'nccnn':
#TODO check for images > 1
preshape = (self.rcvrs, phase//etl, slices, echo*time, etl, read)
shape = (self.rcvrs, echo*time, slices, phase, read)
kspace = np.reshape(kspace, preshape, order='C')
kspace = np.swapaxes(kspace,1,3)
kspace = np.reshape(kspace, shape, order='C')
# shape is [rcvrs, phase, slices, echo*time, read]
kspace = np.swapaxes(kspace,1,3)
kspace_fin = np.zeros_like(kspace)
kspace_fin[:,phase_sort_order,:,:,:] = kspace
kspace_fin = np.moveaxis(kspace_fin, [0,1,4,2,3], self.dest_shape)
kspace = kspace_fin
else:
raise(Exception('not implemented'))
if _is_interleaved(p): # 1 if interleaved slices
if _is_evenslices(p):
c = np.zeros(kspace.shape, dtype='complex64')
c[...,0::2,:] = kspace[...,:slices//2,:]
c[...,1::2,:] = kspace[...,slices//2:,:]
kspace = c
else:
c = np.zeros(kspace.shape, dtype='complex64')
c[...,0::2,:] = kspace[...,:(slices+1)//2,:]
c[...,1::2,:] = kspace[...,(slices-1)//2+1:,:]
kspace = c
self.kspace = kspace
return kspace
def make_im2Dfsecs():
raise(Exception('not implemented'))
def make_im3D():
kspace = self.pre_kspace
p = self.p
(read, phase, phase2) = (int(p['np'])//2, \
int(p['nv']), \
int(p['nv2']))
shiftaxis = (self.config['pe_dim'],\
self.config['ro_dim'],\
self.config['pe2_dim'])
if 'ne' in p.keys():
echo = int(p['ne'])
else:
echo = 1
time = 1
if p['seqcon'] == 'nccsn':
preshape = (self.rcvrs,phase2,phase*echo*time*read)
shape = (self.rcvrs,phase2,phase,echo*time,read)
kspace = np.reshape(kspace,preshape,order='F')
kspace = np.reshape(kspace,shape,order='C')
kspace = np.moveaxis(kspace, [0,2,4,1,3], self.dest_shape)
kspace = np.flip(kspace,axis=3)
if p['seqcon'] == 'ncccn':
preshape = (self.rcvrs,phase2,phase*echo*time*read)
shape = (self.rcvrs,phase,phase2,echo*time,read)
kspace = np.reshape(kspace,preshape,order='F')
kspace = np.reshape(kspace,shape,order='C')
kspace = np.moveaxis(kspace, [0,2,4,1,3], self.dest_shape)
if p['seqcon'] == 'cccsn':
preshape = (self.rcvrs,phase2,phase*echo*time*read)
shape = (self.rcvrs,phase,phase2,echo*time,read)
kspace = | np.reshape(kspace,preshape,order='F') | numpy.reshape |
import numpy as np
from scipy.io import wavfile
import random
import glob
import time
import museval
'''
# FAST BSS TESTBED Version 0.1.0:
timer_start(self):
timer_value(self):
timer_suspend(self):
timer_resume(self):
wavs_to_matrix_S(self, folder_address, duration, source_number):
generate_matrix_A(self, S, mixing_type="random", max_min=(1, 0.01), mu_sigma=(0, 1)):
generate_matrix_S_A_X(self, folder_address, wav_range, source_number,
mixing_type="random", max_min=(1, 0.01), mu_sigma=(0, 1)):
fast_psnr(self, S, hat_S):
bss_evaluation(self, S, hat_S, type='fast_psnr'):
# Basic definition:
S: Source signals. shape = (source_number, time_slots_number)
X: Mixed source signals. shape = (source_number, time_slots_number)
A: Mixing matrix. shape = (source_number, source_number)
B: Separation matrix. shape = (source_number, source_number)
hat_S: Estimated source signals durch ICA algorithms.
shape = (source_number, time_slots_number)
# Notes:
X = A @ S
S = B @ X
B = A ^ -1
'''
class PyFastbssTestbed:
def __init__(self):
self.timer_start_time = 0
self.timer_suspend_time = 0
def timer_start(self):
'''
# timer_start(self):
# Usage:
Start the timer
'''
self.timer_start_time = time.time()
def timer_value(self):
'''
# timer_value(self):
# Usage:
Get the current time
# Output:
The current value (i.e. time) of the timer
'''
return 1000*(time.time()-self.timer_start_time)
def timer_suspend(self):
'''
# timer_suspend(self):
# Usage:
Suspend the timer
'''
self.timer_suspend_time = time.time()
def timer_resume(self):
'''
# timer_resume(self):
# Usage:
Resume the timer
'''
print("suspend: ", time.time() - self.timer_suspend_time)
self.timer_start_time = self.timer_start_time + \
time.time() - self.timer_suspend_time
def wavs_to_matrix_S(self, folder_address, duration, source_number):
'''
# wavs_to_matrix_S(self, folder_address, duration, source_number):
# Usage:
Input the wav files to generate the source signal matrix S
# Parameters:
folder_address: Define folder adress, in which the *.wav files exist.
The wav files must have only 1 channel.
duration: The duration of the output original signals,
i.e. the whole time domain of the output matrix S
source number: The number of the source signals in matrix S
# Output:
Matrix S.
The shape of the S is (source number, time slots number),
the wav files are randomly selected to generate the matrix S.
'''
wav_path = folder_address + '/*.wav'
wav_filenames = glob.glob(wav_path)
random_indexs = random.sample(range(len(wav_filenames)), source_number)
S = []
for i in range(source_number):
sample_rate, _s = wavfile.read(wav_filenames[random_indexs[i]])
wav_length = np.shape(_s)[-1]
wav_range = int(duration*sample_rate)
if wav_range > wav_length:
raise ValueError('Error - wav_to_S : The wav_range too big !')
wav_start = int(0.5*wav_length) - int(0.5*wav_range)
wav_stop = int(0.5*wav_length) + int(0.5*wav_range)
_single_source = _s[wav_start:wav_stop]
_single_source = _single_source / np.mean(abs(_single_source))
S.append(_single_source)
return np.asarray(S)
def generate_matrix_A(self, S, mixing_type="random", max_min=(1, 0.01), mu_sigma=(0, 1)):
'''
# generate_matrix_A(self, S, mixing_type="random", max_min=(1,0.01), mu_sigma=(0,1)):
# Usage:
Generate the mixing matrix A according to the size of the source signal matrix S
# Parameters:
mixing_type:
'random': The value of a_i_j are in interval (minimum_value, minimum_value)
randomly distributed
'normal': The value of a_i_j (i==j) are equal to 1. The value of a_i_j (i!=j)
are normal distributed, the distribution correspond with N(mu,sigma)
normal distirbution, where the mu is the average value of the a_i,j (i!=j) ,
and the sigma is the variance of the a_i_j (i!=j).
max_min: max_min = (minimum_value, minimum_value), are used when the mixing_type
is 'random'
mu_sigma: mu_sigma = (mu, sigma), are used when the mix_type is 'normal'
# Output:
Mixing matrix A.
'''
source_number = np.shape(S)[0]
A = np.zeros([source_number, source_number])
if source_number < 2:
raise ValueError(
'Error - mixing matrix : The number of the sources must more than 1!')
if mixing_type == "random":
A = np.ones((source_number, source_number), np.float)
for i in range(source_number):
for j in range(source_number):
if i != j:
A[i, j] = max_min[1] + \
(max_min[0]-max_min[1])*random.random()
elif mixing_type == "normal":
for i in range(source_number):
for j in range(source_number):
_random_number = abs(np.random.normal(
mu_sigma[0], mu_sigma[1], 1))
while(_random_number >= 0.99):
_random_number = abs(np.random.normal(
mu_sigma[0], mu_sigma[1], 1))
A[i, j] = _random_number
for i in range(source_number):
A[i, i] = 1
return A
def generate_matrix_S_A_X(self, folder_address, wav_range, source_number, mixing_type="random", max_min=(1, 0.01), mu_sigma=(0, 1)):
'''
# generate_matrix_S_A_X(self, folder_address, wav_range, source_number,
# mixing_type="random", max_min=(1, 0.01), mu_sigma=(0, 1)):
# Usage:
Generate the mixing matrix S,A,X according to the size of the source
signal matrix S
# Parameters:
folder_address: Define folder adress, in which the *.wav files exist.
The wav files must have only 1 channel.
duration: The duration of the output original signals,
i.e. the whole time domain of the output matrix S
source number: The number of the source signals in matrix S
mixing_type:
'random': The value of a_i_j are in interval (minimum_value, minimum_value)
randomly distributed
'normal': The value of a_i_j (i==j) are equal to 1. The value of a_i_j (i!=j)
are normal distributed, the distribution correspond with N(mu,sigma)
normal distirbution, where the mu is the average value of the a_i,j (i!=j) ,
and the sigma is the variance of the a_i_j (i!=j).
max_min: max_min = (minimum_value, minimum_value), are used when the mixing_type
is 'random'
mu_sigma: mu_sigma = (mu, sigma), are used when the mix_type is 'normal'
# Output:
Matrix S, A, X.
The shape of the S and X are (source number, time slots number),
the shape of A is (time slots number, time slots number), the wav files are
randomly selected to generate the matrix S, A, X.
'''
S = self.wavs_to_matrix_S(folder_address, wav_range, source_number)
A = self.generate_matrix_A(S, mixing_type, max_min, mu_sigma)
X = np.dot(A, S)
return S, A, X
def fast_psnr(self, S, hat_S):
'''
# fast_psnr(self, S, hat_S):
# Usage:
Calculate the psnr of the estimated source signals (matrix hat_S)
# Parameters:
S: Reference source signal (matrix S)
hat_S: Estimated source signal (matrix hat_S)
# Output:
The mean value of psnr of each sources
'''
original_hat_S = hat_S
S = np.dot(np.diag(1/(np.max(abs(S), axis=1))), S)
hat_S = np.dot(np.diag(1/(np.max(abs(hat_S), axis=1))), hat_S)
amplitude_signal = 0
amplitude_noise = 0
sorted_hat_S = []
for _source in S:
_differences = []
for _hat_source in hat_S:
_differences.append(
np.sum(np.abs(np.abs(_source)-np.abs(_hat_source))))
_row_index = int(np.argmin(_differences))
sorted_hat_S.append(original_hat_S[_row_index])
amplitude_noise += np.min(_differences)
amplitude_signal += np.sum(np.abs(_source))
if amplitude_noise == 0:
raise ValueError('Error - SNR : No noise exists!')
SNR = 20 * | np.log10(amplitude_signal / amplitude_noise) | numpy.log10 |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Compute minibatch blobs for training a RetinaNet network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import logging
import utils.boxes as box_utils
import roi_data.data_utils as data_utils
from core.config import cfg
logger = logging.getLogger(__name__)
def get_retinanet_blob_names(is_training=True):
"""
Returns blob names in the order in which they are read by the data
loader.
"""
# im_info: (height, width, image scale)
blob_names = ['im_info']
assert cfg.FPN.FPN_ON, "RetinaNet uses FPN for dense detection"
# Same format as RPN blobs, but one per FPN level
if is_training:
blob_names += ['roidb', 'retnet_fg_num', 'retnet_bg_num']
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
suffix = 'fpn{}'.format(lvl)
blob_names += [
'retnet_cls_labels_' + suffix,
'retnet_roi_bbox_targets_' + suffix,
'retnet_bbox_inside_weights_wide_' + suffix,
]
return blob_names
def add_retinanet_blobs(blobs, im_scales, roidb, image_width, image_height):
"""Add RetinaNet blobs."""
# RetinaNet is applied to many feature levels, as in the FPN paper
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
num_aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
# get anchors from all levels for all scales/aspect ratios
foas = []
for lvl in range(k_min, k_max + 1):
stride = 2. ** lvl
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for idx in range(num_aspect_ratios):
anchor_sizes = (stride * octave_scale * anchor_scale,)
anchor_aspect_ratios = (aspect_ratios[idx],)
foa = data_utils.get_field_of_anchors(
stride, anchor_sizes, anchor_aspect_ratios, octave, idx)
foas.append(foa)
all_anchors = np.concatenate([f.field_of_anchors for f in foas])
blobs['retnet_fg_num'], blobs['retnet_bg_num'] = 0.0, 0.0
for im_i, entry in enumerate(roidb):
scale = im_scales[im_i]
im_height = np.round(entry['height'] * scale)
im_width = np.round(entry['width'] * scale)
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
assert len(gt_inds) > 0, \
'Empty ground truth empty for image is not allowed. Please check.'
gt_rois = entry['boxes'][gt_inds, :] * scale
gt_classes = entry['gt_classes'][gt_inds]
im_info = np.array([[im_height, im_width, scale]], dtype=np.float32)
blobs['im_info'].append(im_info)
retinanet_blobs, fg_num, bg_num = _get_retinanet_blobs(
foas, all_anchors, gt_rois, gt_classes, image_width, image_height)
for i, foa in enumerate(foas):
for k, v in retinanet_blobs[i].items():
level = int(np.log2(foa.stride))
key = '{}_fpn{}'.format(k, level)
blobs[key].append(v)
blobs['retnet_fg_num'] += fg_num
blobs['retnet_bg_num'] += bg_num
blobs['retnet_fg_num'] = blobs['retnet_fg_num'].astype(np.float32)
blobs['retnet_bg_num'] = blobs['retnet_bg_num'].astype(np.float32)
N = len(roidb)
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
# compute number of anchors
A = int(len(v) / N)
# for the cls branch labels [per fpn level],
# we have blobs['retnet_cls_labels_fpn{}'] as a list until this step
# and length of this list is N x A where
# N = num_images, A = num_anchors for example, N = 2, A = 9
# Each element of the list has the shape 1 x 1 x H x W where H, W are
# spatial dimension of curret fpn lvl. Let a{i} denote the element
# corresponding to anchor i [9 anchors total] in the list.
# The elements in the list are in order [[a0, ..., a9], [a0, ..., a9]]
# however the network will make predictions like 2 x (9 * 80) x H x W
# so we first concatenate the elements of each image to a numpy array
# and then concatenate the two images to get the 2 x 9 x H x W
if k.find('retnet_cls_labels') >= 0 \
or k.find('retnet_roi_bbox_targets') >= 0:
tmp = []
# concat anchors within an image
for i in range(0, len(v), A):
tmp.append(np.concatenate(v[i: i + A], axis=1))
# concat images
blobs[k] = | np.concatenate(tmp, axis=0) | numpy.concatenate |
import logging
import numpy as np
import xobjects as xo
import xtrack.linear_normal_form as lnf
import xpart as xp # To get the right Particles class depending on pyheatail interface state
logger = logging.getLogger(__name__)
def _check_lengths(**kwargs):
length = None
for nn, xx in kwargs.items():
if hasattr(xx, "__iter__"):
if hasattr(xx, 'shape') and len(xx.shape) == 0:
continue
if length is None:
length = len(xx)
else:
if length != len(xx):
raise ValueError(f"invalid length len({nn})={len(xx)}")
if 'num_particles' in kwargs.keys():
num_particles = kwargs['num_particles']
if num_particles is not None and length is None:
length = num_particles
if num_particles is not None and length != num_particles:
raise ValueError(
f"num_particles={num_particles} is inconsistent with array length")
if length is None:
length = 1
return length
def build_particles(_context=None, _buffer=None, _offset=None, _capacity=None,
mode=None,
particle_ref=None,
num_particles=None,
x=None, px=None, y=None, py=None, zeta=None, delta=None,
x_norm=None, px_norm=None, y_norm=None, py_norm=None,
tracker=None,
at_element=None,
match_at_s=None,
particle_on_co=None,
R_matrix=None,
scale_with_transverse_norm_emitt=None,
weight=None,
particles_class=None,
co_search_settings=None,
steps_r_matrix=None,
matrix_responsiveness_tol=None,
matrix_stability_tol=None,
symplectify=False,
):
"""
Function to create particle objects from arrays containing physical or
normalized coordinates.
Arguments:
- mode: choose between:
- `set`: reference quantities including mass0, q0, p0c, gamma0,
etc. are taken from the provided reference particle. Particles
coordinates are set according to the provided input x, px, y, py,
zeta, delta (zero is assumed as default for these variables).
- `shift`: reference quantities including mass0, q0, p0c, gamma0,
etc. are taken from the provided reference particle. Particles
coordinates are set from the reference particles and shifted
according to the provided input x, px, y, py, zeta, delta (zero
is assumed as default for these variables).
- `normalized_transverse`: reference quantities including mass0,
q0, p0c, gamma0, etc. are taken from the provided reference
particle. The longitudinal coordinates are set according to the
provided input `zeta`, `delta` (zero is assumed as default value
for these variable`. The transverse coordinates are computed from
normalized values `x_norm`, `px_norm`, `y_norm`, `py_norm` using
the closed-orbit information and the linear transfer map obtained
from the `tracker` or provided by the user.
The default mode is `set`. `normalized_transverse` is used if any
of x_norm, px_norm, y_norm, pynorm is provided.
- particle_ref: particle object defining the reference quantities
(mass0, 0, p0c, gamma0, etc.). Its coordinates (x, py, y, py, zeta,
delta) are ignored unless `mode`='shift' is selected.
- num_particles: Number of particles to be generated (used if provided
coordinates are all scalar)
- x: x coordinate of the particles (default is 0).
- px: px coordinate of the particles (default is 0).
- y: y coordinate of the particles (default is 0).
- py: py coordinate of the particles (default is 0).
- zeta: zeta coordinate of the particles (default is 0).
- delta: delta coordinate of the particles (default is 0).
- x_norm: transverse normalized coordinate x (in sigmas) used in
combination with the one turn matrix R_matrix and with the
transverse emittances provided in the argument
`scale_with_transverse_norm_emitt` to generate x, px, y, py (x, px,
y, py cannot be provided if x_norm, px_norm, y_norm, py_norm are
provided).
- x_norm: transverse normalized coordinate x (in sigmas).
- px_norm: transverse normalized coordinate px (in sigmas).
- y_norm: transverse normalized coordinate y (in sigmas).
- py_norm: transverse normalized coordinate py (in sigmas).
- tracker: tracker object used to find the closed orbit and the
one-turn matrix.
- particle_on_co: Particle on closed orbit
- R_matrix: 6x6 matrix defining the linearized one-turn map to be used
for the transformation of the normalized coordinates into physical
space.
- scale_with_transverse_norm_emitt: Tuple of two elements defining the
transverse normalized emittances used to rescale the provided
transverse normalized coordinates (x, px, y, py).
- weight: weights to be assigned to the particles.
- at_element: location within the line at which particles are generated.
It can be an index or an element name. It can be given only if
`at_tracker` is provided and `transverse_mode` is "normalized".
- match_at_s: s coordinate of a location in the drifts downstream the
specified `at_element` at which the particles are generated before
being backdrifted to the location specified by `at_element`.
No active element can be present in between.
- _context: xobjects context in which the particle object is allocated.
"""
assert mode in [None, 'set', 'shift', 'normalized_transverse']
Particles = xp.Particles # To get the right Particles class depending on pyheatail interface state
if particles_class is not None:
raise NotImplementedError
if (particle_ref is not None and particle_on_co is not None):
raise ValueError("`particle_ref` and `particle_on_co`"
" cannot be provided at the same time")
if particle_on_co is None and particle_ref is None:
if tracker is not None:
particle_ref = tracker.particle_ref
if particle_ref is None:
assert particle_on_co is not None, (
"`particle_ref` or `particle_on_co` must be provided!")
particle_ref = particle_on_co
if not isinstance(particle_ref._buffer.context, xo.ContextCpu):
particle_ref = particle_ref.copy(_context=xo.ContextCpu())
# Move other input parameters to cpu if needed
# Generated by:
# for nn in 'x px y py zeta delta x_norm px_norm y_norm py_norm'.split():
# print(f'{nn} = ({nn}.get() if hasattr({nn}, "get") else {nn})')
x = (x.get() if hasattr(x, "get") else x)
px = (px.get() if hasattr(px, "get") else px)
y = (y.get() if hasattr(y, "get") else y)
py = (py.get() if hasattr(py, "get") else py)
zeta = (zeta.get() if hasattr(zeta, "get") else zeta)
delta = (delta.get() if hasattr(delta, "get") else delta)
x_norm = (x_norm.get() if hasattr(x_norm, "get") else x_norm)
px_norm = (px_norm.get() if hasattr(px_norm, "get") else px_norm)
y_norm = (y_norm.get() if hasattr(y_norm, "get") else y_norm)
py_norm = (py_norm.get() if hasattr(py_norm, "get") else py_norm)
if tracker is not None and tracker.iscollective:
logger.warning('Ignoring collective elements in particles generation.')
tracker = tracker._supertracker
if tracker is not None:
if matrix_responsiveness_tol is None:
matrix_responsiveness_tol = tracker.matrix_responsiveness_tol
if matrix_stability_tol is None:
matrix_stability_tol = tracker.matrix_stability_tol
if matrix_responsiveness_tol is None:
matrix_responsiveness_tol=lnf.DEFAULT_MATRIX_RESPONSIVENESS_TOL
if matrix_stability_tol is None:
matrix_stability_tol=lnf.DEFAULT_MATRIX_STABILITY_TOL
if zeta is None:
zeta = 0
if delta is None:
delta = 0
if (x_norm is not None or px_norm is not None
or y_norm is not None or py_norm is not None):
assert (x is None and px is None
and y is None and py is None)
if mode is None:
mode = 'normalized_transverse'
else:
assert mode == 'normalized_transverse'
if mode is None:
mode = 'set'
if mode == 'normalized_transverse':
if x_norm is None: x_norm = 0
if px_norm is None: px_norm = 0
if y_norm is None: y_norm = 0
if py_norm is None: py_norm = 0
else:
if x is None: x = 0
if px is None: px = 0
if y is None: y = 0
if py is None: py = 0
assert particle_ref._capacity == 1
ref_dict = {
'q0': particle_ref.q0,
'mass0': particle_ref.mass0,
'p0c': particle_ref.p0c[0],
'gamma0': particle_ref.gamma0[0],
'beta0': particle_ref.beta0[0],
}
part_dict = ref_dict.copy()
if at_element is not None or match_at_s is not None:
# Only this case is covered if not starting at element 0
assert tracker is not None
assert mode == 'normalized_transverse'
if isinstance(at_element, str):
at_element = tracker.line.element_names.index(at_element)
if match_at_s is not None:
import xtrack as xt
assert at_element is not None, (
'If `match_at_s` is provided, `at_element` needs to be provided and'
'needs to correspond to the corresponding element in the sequence'
)
# Match at a position where there is no marker and backtrack to the previous marker
expected_at_element = np.where(np.array(
tracker.line.get_s_elements())<=match_at_s)[0][-1]
assert at_element == expected_at_element or (
at_element < expected_at_element and
all([isinstance(tracker.line.element_dict[nn], xt.Drift)
for nn in tracker.line.element_names[at_element:expected_at_element]])), (
"`match_at_s` can only be placed in the drifts upstream of the "
"specified `at_element`. No active element can be present in between."
)
(tracker_rmat, _
) = xt.twiss_from_tracker._build_auxiliary_tracker_with_extra_markers(
tracker=tracker, at_s=[match_at_s], marker_prefix='xpart_rmat_')
at_element_tracker_rmat = tracker_rmat.line.element_names.index(
'xpart_rmat_0')
else:
tracker_rmat = tracker
at_element_tracker_rmat = at_element
if mode == 'normalized_transverse':
if particle_on_co is None:
assert tracker is not None
particle_on_co = tracker.find_closed_orbit(
particle_co_guess=Particles(
x=0, px=0, y=0, py=0, zeta=0, delta=0.,
**ref_dict),
co_search_settings=co_search_settings)
else:
assert particle_on_co._capacity == 1
if not isinstance(particle_on_co._buffer.context, xo.ContextCpu):
particle_on_co = particle_on_co.copy(_context=xo.ContextCpu())
assert particle_on_co.at_element[0] == 0
assert particle_on_co.s[0] == 0
assert particle_on_co.state[0] == 1
if at_element_tracker_rmat is not None:
# Match in a different position of the line
assert at_element_tracker_rmat > 0
part_co_ctx = particle_on_co.copy(_context=tracker_rmat._buffer.context)
tracker_rmat.track(part_co_ctx, num_elements=at_element_tracker_rmat)
particle_on_co = part_co_ctx.copy(_context=xo.ContextCpu())
if R_matrix is None:
# R matrix at location defined by particle_on_co.at_element
R_matrix = tracker_rmat.compute_one_turn_matrix_finite_differences(
particle_on_co=particle_on_co, steps_r_matrix=steps_r_matrix)
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x_norm=x_norm, px_norm=px_norm,
y_norm=y_norm, py_norm=py_norm)
if scale_with_transverse_norm_emitt is not None:
assert len(scale_with_transverse_norm_emitt) == 2
nemitt_x = scale_with_transverse_norm_emitt[0]
nemitt_y = scale_with_transverse_norm_emitt[1]
gemitt_x = nemitt_x/particle_ref.beta0/particle_ref.gamma0
gemitt_y = nemitt_y/particle_ref.beta0/particle_ref.gamma0
x_norm_scaled = np.sqrt(gemitt_x) * x_norm
px_norm_scaled = np.sqrt(gemitt_x) * px_norm
y_norm_scaled = np.sqrt(gemitt_y) * y_norm
py_norm_scaled = np.sqrt(gemitt_y) * py_norm
else:
x_norm_scaled = x_norm
px_norm_scaled = px_norm
y_norm_scaled = y_norm
py_norm_scaled = py_norm
WW, WWinv, Rot = lnf.compute_linear_normal_form(R_matrix,
symplectify=symplectify,
responsiveness_tol=matrix_responsiveness_tol,
stability_tol=matrix_stability_tol)
# Transform long. coordinates to normalized space
XX_long = np.zeros(shape=(6, num_particles), dtype=np.float64)
XX_long[4, :] = zeta - particle_on_co.zeta
XX_long[5, :] = delta - particle_on_co.delta
XX_norm_scaled = np.dot(WWinv, XX_long)
XX_norm_scaled[0, :] = x_norm_scaled
XX_norm_scaled[1, :] = px_norm_scaled
XX_norm_scaled[2, :] = y_norm_scaled
XX_norm_scaled[3, :] = py_norm_scaled
# Transform to physical coordinates
XX = np.dot(WW, XX_norm_scaled)
XX[0, :] += particle_on_co.x
XX[1, :] += particle_on_co.px
XX[2, :] += particle_on_co.y
XX[3, :] += particle_on_co.py
XX[4, :] += particle_on_co.zeta
XX[5, :] += particle_on_co.delta
elif mode == 'set':
if R_matrix is not None:
logger.warning('R_matrix provided but not used in this mode!')
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x=x, px=px,
y=y, py=py)
XX = np.zeros(shape=(6, num_particles), dtype=np.float64)
XX[0, :] = x
XX[1, :] = px
XX[2, :] = y
XX[3, :] = py
XX[4, :] = zeta
XX[5, :] = delta
elif mode == "shift":
if R_matrix is not None:
logger.warning('R_matrix provided but not used in this mode!')
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x=x, px=px,
y=y, py=py)
XX = | np.zeros(shape=(6, num_particles), dtype=np.float64) | numpy.zeros |
import tensorflow as tf
def prune(before,after,p):
global c
f=after-before
for i in [0,2,4,6]:
# ct=np.count_nonzero(f[i])
# print(ct)
updates=f[i]
all_=abs(updates).flatten()
all_=all_[all_!=0]
l=int(len(all_)*p)
k=max(np.partition(all_,l)[:l])
updates[abs(updates)<=k]=int(0)
f[i]=updates
# print(ct-np.count_nonzero(f[i]))
# print("---")
# print("###")
return f+before
def disp(t):
for i in t:
print(i.shape)
print("-")
def kmeans(f):
x=f.flatten()
y=x.reshape(-1,1)
clusters_n = 128
iteration_n = 100
points = tf.constant(y)
centroids = tf.Variable(tf.slice(tf.random_shuffle(points), [0, 0], [clusters_n, -1]))
points_expanded = tf.expand_dims(points, 0)
centroids_expanded = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
assignments = tf.argmin(distances, 0)
means = []
for c in range(clusters_n):
means.append(tf.reduce_mean(
tf.gather(points,
tf.reshape(
tf.where(
tf.equal(assignments, c)
),[1,-1])
),reduction_indices=[1]))
new_centroids = tf.concat(means, 0)
update_centroids = tf.assign(centroids, new_centroids)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(iteration_n):
[_, centroid_values, points_values, assignment_values] = sess.run([update_centroids, centroids, points, assignments])
return centroid_values
def call_kmeans(before,after):
f=after-before
# Layer 6
c=kmeans(f[6])
# print(np.count_nonzero(f[6]))
c = c[~ | np.isnan(c) | numpy.isnan |
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from argparse import ArgumentParser
from os import path
from time import time
from utils import trj2blocks
# MDAnalysis
import MDAnalysis as mda
from MDAnalysis.analysis.distances import distance_array
def parse():
'''Parse command line arguments.
Returns:
Namespace object containing input arguments.
'''
parser = ArgumentParser(description='MDTools: Local structure index')
parser.add_argument('-i', '--input', required=True, type=str,
help='Input .xyz file')
parser.add_argument('-n', '--n_cpu', required=True, type=int,
help='Number of CPUs for parallel processing')
parser.add_argument('-c', '--cell_vectors', required=True, type=float,
help='Lattice vectors in angstroms (a, b, c)', nargs=3)
parser.add_argument('-nb', '--n_bins', required=True, type=int,
help='Number of bins')
return parser.parse_args()
def lsi(u, block):
'''Computes local structure index (LSI).
Args:
u: MDAnalysis Universe object containing trajectory.
block: Range of frames composing block.
Returns:
Local structure index and heights of each oxygen.
'''
# Select oxygen atoms
oxygen = u.select_atoms('name O')
# Initialize OO distance array
rOO = np.zeros((len(oxygen), len(oxygen)))
lsindex = []
height = []
for i, ts in enumerate(u.trajectory[block.start:block.stop]):
print('Processing blocks %.1f%%' % (100*i/len(block)), end='\r')
# Compute OO distance array
distance_array(oxygen.positions, oxygen.positions,
box=u.dimensions, result=rOO)
# Loop over oxygen atoms
for j, pos in enumerate(oxygen.positions):
# Sort OO distance
r = np.sort(rOO[j])
# Consider all OO distances less than 3.7 angstrom
delta = r[np.roll((r > 0)*(r < 3.7), 1)]-r[(r > 0)*(r < 3.7)]
# Get mean and evaluate LSI as mean of squared differences to mean
ave = np.mean(delta)
lsindex.append(np.sum((delta-ave)**2)/len(delta))
# Store height of oxygen
height.append(pos[2])
return np.vstack((lsindex, height)).T
def main():
args = parse()
input = args.input
n_jobs = args.n_cpu
n_bins = args.n_bins
a, b, c = args.cell_vectors
CURRENT_PATH = path.dirname(path.realpath(__file__))
DATA_PATH = path.normpath(path.join(CURRENT_PATH, path.dirname(input)))
base = path.splitext(path.basename(input))[0]
# Initialize universe (time step 0.5 fs)
u = mda.Universe(input, dt=5e-4)
u.add_TopologyAttr('charges')
u.dimensions = | np.array([a, b, c, 90, 90, 90]) | numpy.array |
import time
import os
import arcade
import argparse
import gym
from gym import spaces
import swarm_env
import numpy as np
import random
import sys
sys.path.insert(0, '..')
from objects import SwarmSimulator
# Running experiment 22 in standalone file.
def experiment_runner(SWARM_SIZE = 15, ARENA_WIDTH = 600, ARENA_HEIGHT = 600, name_of_experiment = time.time(), INPUT_TIME = 300, GRID_X = 40, GRID_Y = 40,
disaster_size = 1, disaster_location = 'random', operator_size = 1, operator_location = 'random', reliability = (100, 101), unreliability_percentage = 0,
moving_disaster = False, communication_noise = 0, alpha = 10, normal_command = None, command_period = 0, constant_repulsion = False,
operator_vision_radius = 150, communication_range = 8, vision_range = 2, velocity_weight_coef = 0.01, boundary_repulsion = 1, aging_factor = 0.9999,
gp = False, gp_step = 50, maze = None, through_walls = True, rl_sim = None):
########### q-learning parameter setup #############
max_steps_per_episode = 10 # Steps allowed in a single episode.
learning_rate = 0.1 # alpha in bellman.
discount_rate = 0.99 # gamma in bellman for discount.
# Epsilon greedy policy vars.
exploration_rate = 1 # To set exploration (1 means 100% exploration)
max_exploration_rate = 1 # How large can exploration be.
min_exploration_rate = 0.01 # How small can exploration be.
exploration_decay_rate = 0.001 # decay rate for exploration.
rewards_all_episodes = [] # Saving all scores in rewards.
gym_swarm_env = gym.make('humanswarm-v0', maze_size=GRID_X) # Creating the environment for swarm learning.
gym_swarm_env.action_space = np.zeros((GRID_X, GRID_Y))
q_table = np.zeros((gym_swarm_env.observation_space.n , gym_swarm_env.action_space.size)) # Creating q-table for measuring score.
action = np.zeros((gym_swarm_env.action_space.size))
print('\n')
print("===== Reinforcement Parameters =====")
print("# Discount rate: " + str(discount_rate))
print("# Learning rate: " + str(learning_rate))
print("# Max steps per iteration: " + str(max_steps_per_episode))
print("# Max exploration rate: " + str(max_exploration_rate))
print("# Min exploration rate: " + str(min_exploration_rate))
print("# Exploration decay rate: " + str(exploration_decay_rate))
print("# Algorithm: " + str(rl_sim))
print("# State space size: " + str(gym_swarm_env.observation_space.n))
print("# Action space size: " + str(gym_swarm_env.action_space.size))
print("# Q-table size: " + str(q_table.shape))
print("====================================")
print('\n')
# Implemeting Q-learning algorithm.
done = False
state = gym_swarm_env.reset()
s_list = []
for step in range(max_steps_per_episode):
print('\n' + "============ start of step " + str(step) + " =============")
"""
In this loop we will set up exploration-exploitation trade-off,
Taking new action,
Updating Q-table,
Setting new state,
Adding new reward.
"""
# Simulation functions
sim = SwarmSimulator(ARENA_WIDTH, ARENA_HEIGHT, name_of_experiment, SWARM_SIZE, INPUT_TIME, GRID_X, GRID_Y, rl_sim)
sim.setup(disaster_size, disaster_location, operator_size, operator_location, reliability[0], reliability[1], unreliability_percentage, moving_disaster, communication_noise,
alpha, normal_command, command_period, constant_repulsion, operator_vision_radius,
communication_range, vision_range, velocity_weight_coef, boundary_repulsion, aging_factor, gp, gp_step, maze, through_walls)
if (not os.path.isdir('../outputs/' + name_of_experiment)):
os.mkdir('../outputs/' + name_of_experiment)
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step))):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step))
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data')):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data')
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data' + '/results')):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data' + '/results')
sim.directory = str('../outputs/' + name_of_experiment + '/data/results/'+ str(time.time()))
while os.path.isdir(sim.directory):
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/' + str(time.time()))
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/'+ str(time.time()))
while os.path.isdir(sim.directory):
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/' + str(time.time()))
directory = sim.directory
os.mkdir(directory)
sim.log_setup(directory)
# Adding new RL parameters to log #
with open(directory + "/log_setup.txt", "a") as file:
file.write('\n')
file.write('REINFORCEMENT LEARNING INFO:' + '\n')
file.write(' -- DISCOUNT RATE: ' + str(discount_rate) + '\n')
file.write(' -- LEARNING RATE: ' + str(learning_rate) + '\n')
file.write(' -- MAX STEPS PER ITERATION: ' + str(max_steps_per_episode) + '\n')
file.write(' -- MAX EXPLORATION RATE: ' + str(max_exploration_rate) + '\n')
file.write(' -- MIN EXPLORATION RATE: ' + str(min_exploration_rate) + '\n')
file.write(' -- EXPLORATION DECAY RATE: ' + str(exploration_decay_rate) + '\n')
file.write(' -- ALGORITHM: ' + str(rl_sim) + '\n')
file.write(' -- STATE SPACE SIZE: ' + str(gym_swarm_env.observation_space.n) + '\n')
file.write(' -- ACTION SPACE SIZE: ' + str(gym_swarm_env.action_space.size) + '\n')
file.write(' -- Q-TABLE SIZE: ' + str(q_table.shape) + '\n')
arcade.run()
########################
##### Exploration and explotation block. ####
exploration_rate_threshold = random.uniform(0, 1) # Setting a random number that will be compared to exploration_rate.
if exploration_rate_threshold > exploration_rate:
i, j = np.unravel_index(np.argmax(q_table[state, :]), q_table.shape)
print ("i ", i, " , j ", j)
#action = (i, j) # Choosing the action that had the highest q-value in q-table.
action = i*GRID_X + j # Choosing the action that had the highest q-value in q-table.
#print (action)
#exit(0)
else:
i = random.randint(0, GRID_X - 1)
j = random.randint(0, GRID_Y - 1)
action = i*GRID_X + j # Sample an action randomly to explore.
##### Exploration and explotation block. ####
##### Taking appropriate action after choosing the action. ####
new_state, reward, done, info, operator_cm = gym_swarm_env.step(action, sim.operator_list[0], GRID_X, GRID_Y) # Returns a tuple contaning the new state, the reward for this action, the end status of action, some additional info.
sim.operator_list[0].confidence_map = operator_cm
# Updating q-table values
q_table[state, action]=q_table[state, action] * (1 - learning_rate) + \
learning_rate * (reward + discount_rate * | np.max(q_table[new_state, :]) | numpy.max |
import sys
import typing
import numpy as np
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(n, 5).sum(axis=1)
print( | np.sum(a < 20) | numpy.sum |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = | np.array([]) | numpy.array |
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from tabulate import tabulate
from zepid.calc.utils import (risk_ci, incidence_rate_ci, risk_ratio, risk_difference, number_needed_to_treat,
odds_ratio, incidence_rate_difference, incidence_rate_ratio, sensitivity, specificity)
#########################################################################################################
# Measures of effect / association
#########################################################################################################
class RiskRatio:
r"""Estimate of Risk Ratio with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk ratio is calculated from
.. math::
RR = \frac{\Pr(Y|A=1)}{\Pr(Y|A=0)}
Risk ratio standard error is
.. math::
SE = \left(\frac{1}{a} - \frac{1}{a + b} + \frac{1}{c} - \frac{1}{c + d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works supports binary outcomes
Parameters
------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the risk ratio in a data set
>>> from zepid import RiskRatio, load_sample_data
>>> df = load_sample_data(False)
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Calculate the risk ratio with exposure of '1' as the reference category
>>> rr = RiskRatio(reference=1)
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Generate a plot of the calculated risk ratio(s)
>>> import matplotlib.pyplot as plt
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_ratio = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Ratio given a data set
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rr_lcl = []
rr_ucl = []
rr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
self.risk_ratio.append(1)
rr_lcl.append(None)
rr_ucl.append(None)
rr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
ri, lr, ur, sd, *_ = risk_ci(events=a, total=(a+b), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
em, lcl, ucl, sd, *_ = risk_ratio(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.risk_ratio.append(em)
rr_lcl.append(lcl)
rr_ucl.append(ucl)
rr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['Risk'] = self.risks
rf['SD(Risk)'] = risk_sd
rf['Risk_LCL'] = risk_lcl
rf['Risk_UCL'] = risk_ucl
rf['RiskRatio'] = self.risk_ratio
rf['SD(RR)'] = rr_sd
rf['RR_LCL'] = rr_lcl
rf['RR_UCL'] = rr_ucl
rf['CLR'] = rf['RR_UCL'] / rf['RR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Risk Ratio ')
print('======================================================================')
print(self.results[['Risk', 'SD(Risk)', 'Risk_LCL', 'Risk_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskRatio', 'SD(RR)', 'RR_LCL', 'RR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, measure='risk_ratio', scale='linear', center=1, **errorbar_kwargs):
"""Plot the risk ratios or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display risk ratios or risks. Default is to display the risk ratio. Options are;
* 'risk_ratio' : display risk ratios
* 'risk' : display risks
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. For the risk ratio, the reference line defaults to 1. For risks, no reference line is
displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'risk_ratio':
ax = _plotter(estimate=self.results['RiskRatio'], lcl=self.results['RR_LCL'], ucl=self.results['RR_UCL'],
labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Risk Ratio')
elif measure == 'risk':
ax = _plotter(estimate=self.results['Risk'], lcl=self.results['Risk_LCL'], ucl=self.results['Risk_UCL'],
labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Risk')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "risk_ratio" or "risk" for plots')
return ax
class RiskDifference:
r"""Estimate of Risk Difference with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk difference is calculated as
.. math::
RD = \Pr(Y|A=1) - \Pr(Y|A=0)
Risk difference standard error is calculated as
.. math::
SE = \left(\frac{R_1 \times (1 - R_1)}{a+b} + \frac{R_0 \times (1-R_0)}{c+d}\right)^{\frac{1}{2}}
In addition to confidence intervals, the Frechet bounds are calculated as well. These probability bounds are useful
for a comparison. Within these bounds, the true causal risk difference in the sample must live. The only
assumptions these bounds require are no measurement error, causal consistency, no selection bias, and any missing
data is MCAR. These bounds are always unit width (width of one), but they do not require any assumptions regarding
confounding / conditional exchangeability. They are calculated via the following formula
.. math::
Lower = \Pr(Y|A=a)\Pr(A=a) - \Pr(Y|A \ne a)\Pr(A \ne a) - \Pr(A=a)\\
Upper = \Pr(Y|A=a)\Pr(A=a) + \Pr(A \ne a) - \Pr(Y|A \ne a)\Pr(A \ne a)
For further details on these bounds, see the references
Note
----
Outcome must be coded as (1: yes, 0:no). Only supports binary outcomes
Parameters
------------
reference : integer, optional
-reference category for comparisons. Default reference category is 0
alpha : float, optional
-Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
References
----------
Cole SR et al. (2019) Nonparametric Bounds for the Risk Function. American Journal of Epidemiology. 188(4), 632-636
Examples
--------
Calculate the risk difference in a data set
>>> from zepid import RiskDifference, load_sample_data
>>> df = load_sample_data(False)
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Calculate the risk difference with exposure of '1' as the reference category
>>> rd = RiskDifference(reference=1)
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Generate a plot of the calculated risk difference(s)
>>> import matplotlib.pyplot as plt
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_difference = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self.n = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Difference
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
n = df.dropna(subset=[exposure, outcome]).shape[0]
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rd_lcl = []
rd_ucl = []
rd_sd = []
fr_lower = []
fr_upper = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:' + str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
self.risk_difference.append(0)
rd_lcl.append(None)
rd_ucl.append(None)
rd_sd.append(None)
fr_lower.append(None)
fr_upper.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
ri, lr, ur, sd, *_ = risk_ci(events=a, total=(a + b), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
em, lcl, ucl, sd, *_ = risk_difference(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.risk_difference.append(em)
rd_lcl.append(lcl)
rd_ucl.append(ucl)
rd_sd.append(sd)
fr_lower.append(ri*((a+b)/n) - (1-ri)*(1 - (a+b)/n) - ((a+b)/n))
fr_upper.append(ri*((a+b)/n) + (1 - (a+b)/n) - (1-ri)*(1 - (a+b)/n))
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self.n = n
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['Risk'] = self.risks
rf['SD(Risk)'] = risk_sd
rf['Risk_LCL'] = risk_lcl
rf['Risk_UCL'] = risk_ucl
rf['RiskDifference'] = self.risk_difference
rf['SD(RD)'] = rd_sd
rf['RD_LCL'] = rd_lcl
rf['RD_UCL'] = rd_ucl
rf['CLD'] = rf['RD_UCL'] - rf['RD_LCL']
rf['LowerBound'] = fr_lower
rf['UpperBound'] = fr_upper
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Risk Difference ')
print('======================================================================')
print(self.results[['Risk', 'SD(Risk)', 'Risk_LCL', 'Risk_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskDifference', 'SD(RD)', 'RD_LCL', 'RD_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskDifference', 'CLD', 'LowerBound', 'UpperBound']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, measure='risk_difference', center=0, **errorbar_kwargs):
"""Plot the risk differences or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display risk differences or risks. Default is to display the risk difference. Options are;
* 'risk_difference' : display risk differences
* 'risk' : display risks
center : str, optional
Sets a reference line. For the risk difference, the reference line defaults to 0. For risks, no reference
line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'risk_difference':
ax = _plotter(estimate=self.results['RiskDifference'], lcl=self.results['RD_LCL'],
ucl=self.results['RD_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
ax.set_title('Risk Difference')
elif measure == 'risk':
ax = _plotter(estimate=self.results['Risk'], lcl=self.results['Risk_LCL'], ucl=self.results['Risk_UCL'],
labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Risk')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "risk_difference" or "risk" for plots')
return ax
class NNT:
r"""Estimates of Number Needed to Treat. NNT (1-alpha)*100% confidence interval presentation is based on
Altman, DG (BMJ 1998). Missing data is ignored
Number needed to treat is calculated as
.. math::
NNT = \frac{1}{RD}
Risk difference the corresponding confidence intervals come from
.. math::
RD = \Pr(Y|A=1) - \Pr(Y|A=0)
Risk difference standard error is calculated as
.. math::
SE = \left(\frac{R_1 \times (1 - R_1)}{a+b} + \frac{R_0 \times (1-R_0)}{c+d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the number needed to treat in a data set
>>> from zepid import NNT, load_sample_data
>>> df = load_sample_data(False)
>>> nnt = NNT()
>>> nnt.fit(df, exposure='art', outcome='dead')
>>> nnt.summary()
Calculate the number needed to treat with '1' as the reference category
>>> nnt = NNT(reference=1)
>>> nnt.fit(df, exposure='art', outcome='dead')
>>> nnt.summary()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.number_needed_to_treat = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the NNT
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
nnt_lcl = []
nnt_ucl = []
nnt_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:' + str(self.reference))
self.number_needed_to_treat.append(np.inf)
nnt_lcl.append(None)
nnt_ucl.append(None)
nnt_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
em, lcl, ucl, sd, *_ = number_needed_to_treat(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.number_needed_to_treat.append(em)
nnt_lcl.append(lcl)
nnt_ucl.append(ucl)
nnt_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['NNT'] = self.number_needed_to_treat
rf['SD(RD)'] = nnt_sd
rf['NNT_LCL'] = nnt_lcl
rf['NNT_UCL'] = nnt_ucl
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for i, r in self.results.iterrows():
if i == self._labels[0]:
pass
else:
print('======================================================================')
print(' Number Needed to Treat/Harm ')
print('======================================================================')
if r['NNT'] == np.inf:
print('Number Needed to Treat = infinite')
else:
if r['NNT'] > 0:
print('Number Needed to Harm: ', round(abs(r['NNT']), decimal))
if r['NNT'] < 0:
print('Number Needed to Treat: ', round(abs(r['NNT']), decimal))
print('----------------------------------------------------------------------')
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: ')
if r['NNT_LCL'] < 0 < r['NNT_UCL']:
print('NNT ', round(abs(r['NNT_LCL']), decimal), 'to infinity to NNH ',
round(abs(r['NNT_UCL']), decimal))
elif 0 < r['NNT_LCL']:
print('NNT ', round(abs(r['NNT_LCL']), decimal), ' to ', round(abs(r['NNT_UCL']), decimal))
else:
print('NNH ', round(abs(r['NNT_LCL']), decimal), ' to ', round(abs(r['NNT_UCL']), decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
class OddsRatio:
r"""Estimates of Odds Ratio with a (1-alpha)*100% Confidence interval. Missing data is ignored
Odds ratio is calculated from
.. math::
OR = \frac{\Pr(Y|A=1)}{1 - \Pr(Y|A=1)} / \frac{\Pr(Y|A=0)}{1 - \Pr(Y|A=0)}
Odds ratio standard error is
.. math::
SE = \left(\frac{1}{a} + \frac{1}{b} + \frac{1}{c} + \frac{1}{d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
---------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the odds ratio in a data set
>>> from zepid import OddsRatio, load_sample_data
>>> df = load_sample_data(False)
>>> ort = OddsRatio()
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.summary()
Calculate the odds ratio with exposure of '1' as the reference category
>>> ort = OddsRatio(reference=1)
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.summary()
Generate a plot of the calculated odds ratio(s)
>>> import matplotlib.pyplot as plt
>>> ort = OddsRatio()
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.odds_ratio = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the Odds Ratio
Parameters
---------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
odr_lcl = []
odr_ucl = []
odr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:'+str(self.reference))
self.odds_ratio.append(1)
odr_lcl.append(None)
odr_ucl.append(None)
odr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
em, lcl, ucl, sd, *_ = odds_ratio(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.odds_ratio.append(em)
odr_lcl.append(lcl)
odr_ucl.append(ucl)
odr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['OddsRatio'] = self.odds_ratio
rf['SD(OR)'] = odr_sd
rf['OR_LCL'] = odr_lcl
rf['OR_UCL'] = odr_ucl
rf['CLR'] = rf['OR_UCL'] / rf['OR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
---------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Odds Ratio ')
print('======================================================================')
print(self.results[['OddsRatio', 'SD(OR)', 'OR_LCL', 'OR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, scale='linear', center=1, **errorbar_kwargs):
"""Plot the odds ratios along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. The reference line defaults to 1.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
ax = _plotter(estimate=self.results['OddsRatio'], lcl=self.results['OR_LCL'], ucl=self.results['OR_UCL'],
labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Odds Ratio')
return ax
class IncidenceRateRatio:
r"""Estimates of Incidence Rate Ratio with a (1-alpha)*100% Confidence interval. Missing data is ignored
Incidence rate ratio is calculated from
.. math::
IR = \frac{a}{t_1} / \frac{c}{t_0}
Incidence rate ratio standard error is
.. math::
SE = \left(\frac{1}{a} + \frac{1}{c}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
------------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the incidence rate ratio in a data set
>>> from zepid import IncidenceRateRatio, load_sample_data
>>> df = load_sample_data(False)
>>> irr = IncidenceRateRatio()
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.summary()
Calculate the incidence rate ratio with exposure of '1' as the reference category
>>> irr = IncidenceRateRatio(reference=1)
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.summary()
Generate a plot of the calculated incidence rate ratio(s)
>>> import matplotlib.pyplot as plt
>>> irr = IncidenceRateRatio()
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.incidence_rate = []
self.incidence_rate_ratio = []
self.results = None
self._a_list = []
self._a_time_list = []
self._c = None
self._c_time = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self._missing_t = None
def fit(self, df, exposure, outcome, time):
"""Calculate the Incidence Rate Ratio
Parameters
------------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
time : string
Column name of time contributed
"""
# Setting up holders for results
ir_lcl = []
ir_ucl = []
ir_sd = []
irr_lcl = []
irr_ucl = []
irr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._c_time = df.loc[df[exposure] == self.reference][time].sum()
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = incidence_rate_ci(events=self._c, time=self._c_time, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
self.incidence_rate_ratio.append(1)
irr_lcl.append(None)
irr_ucl.append(None)
irr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
a_t = df.loc[df[exposure] == i][time].sum()
self._a_time_list.append(a_t)
ri, lr, ur, sd, *_ = incidence_rate_ci(events=a, time=a_t, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
em, lcl, ucl, sd, *_ = incidence_rate_ratio(a=a, t1=a_t, c=self._c, t2=self._c_time, alpha=self.alpha)
self.incidence_rate_ratio.append(em)
irr_lcl.append(lcl)
irr_ucl.append(ucl)
irr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self._missing_t = df.loc[df[time].isnull()].shape[0]
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['IncRate'] = self.incidence_rate
rf['SD(IncRate)'] = ir_sd
rf['IncRate_LCL'] = ir_lcl
rf['IncRate_UCL'] = ir_ucl
rf['IncRateRatio'] = self.incidence_rate_ratio
rf['SD(IRR)'] = irr_sd
rf['IRR_LCL'] = irr_lcl
rf['IRR_UCL'] = irr_ucl
rf['CLR'] = rf['IRR_UCL'] / rf['IRR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, a_t, l in zip(self._a_list, self._a_time_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, a_t], ['E=0', self._c, self._c_time]], headers=['', 'D=1', 'Person-time'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Incidence Rate Ratio ')
print('======================================================================')
print(self.results[['IncRate', 'SD(IncRate)', 'IncRate_LCL', 'IncRate_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['IncRateRatio', 'SD(IRR)', 'IRR_LCL', 'IRR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('Missing T: ', self._missing_t)
print('======================================================================')
def plot(self, measure='incidence_rate_ratio', scale='linear', center=1, **errorbar_kwargs):
"""Plot the risk ratios or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display incidence rate ratios or incidence rates. Default is to display the incidence rate ratio.
Options are;
* 'incidence_rate_ratio' : display incidence rate ratios
* 'incidence_rate' : display incidence rates
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. For the incidence rate ratio, the reference line defaults to 1. For incidence rates,
no reference line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'incidence_rate_ratio':
ax = _plotter(estimate=self.results['IncRateRatio'], lcl=self.results['IRR_LCL'],
ucl=self.results['IRR_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Incidence Rate Ratio')
elif measure == 'incidence_rate':
ax = _plotter(estimate=self.results['IncRate'], lcl=self.results['IncRate_LCL'],
ucl=self.results['IncRate_UCL'], labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Incidence Rate')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "incidence_rate_ratio" or "incidence_rate" for plots')
return ax
class IncidenceRateDifference:
r"""Estimates of Incidence Rate Difference with a (1-alpha)*100% Confidence interval. Missing data is ignored.
Incidence rate difference is calculated from
.. math::
ID = \frac{a}{t_1} - \frac{c}{t_0}
Incidence rate difference standard error is
.. math::
SE = \left(\frac{a}{t_1^2} + \frac{c}{t_0^2}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
----------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the incidence rate difference in a data set
>>> from zepid import IncidenceRateDifference, load_sample_data
>>> df = load_sample_data(False)
>>> ird = IncidenceRateDifference()
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.summary()
Calculate the incidence rate difference with exposure of '1' as the reference category
>>> ird = IncidenceRateDifference(reference=1)
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.summary()
Generate a plot of the calculated incidence rate difference(s)
>>> import matplotlib.pyplot as plt
>>> ird = IncidenceRateDifference()
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.incidence_rate = []
self.incidence_rate_difference = []
self.results = None
self._a_list = []
self._a_time_list = []
self._c = None
self._c_time = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self._missing_t = None
def fit(self, df, exposure, outcome, time):
"""Calculates the Incidence Rate Difference
Parameters
----------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : str
Column name of exposure variable
outcome : str
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
time : str
Column name of time variable
"""
# Setting up holders for results
ir_lcl = []
ir_ucl = []
ir_sd = []
ird_lcl = []
ird_ucl = []
ird_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._c_time = df.loc[df[exposure] == self.reference][time].sum()
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = incidence_rate_ci(events=self._c, time=self._c_time, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
self.incidence_rate_difference.append(0)
ird_lcl.append(None)
ird_ucl.append(None)
ird_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
a_t = df.loc[df[exposure] == i][time].sum()
self._a_time_list.append(a_t)
ri, lr, ur, sd, *_ = incidence_rate_ci(events=a, time=a_t, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
em, lcl, ucl, sd, *_ = incidence_rate_difference(a=a, t1=a_t, c=self._c, t2=self._c_time, alpha=self.alpha)
self.incidence_rate_difference.append(em)
ird_lcl.append(lcl)
ird_ucl.append(ucl)
ird_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self._missing_t = df.loc[df[time].isnull()].shape[0]
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['IncRate'] = self.incidence_rate
rf['SD(IncRate)'] = ir_sd
rf['IncRate_LCL'] = ir_lcl
rf['IncRate_UCL'] = ir_ucl
rf['IncRateDiff'] = self.incidence_rate_difference
rf['SD(IRD)'] = ird_sd
rf['IRD_LCL'] = ird_lcl
rf['IRD_UCL'] = ird_ucl
rf['CLD'] = rf['IRD_UCL'] - rf['IRD_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
----------------
decimal : integer, optional
Decimal places to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, a_t, l in zip(self._a_list, self._a_time_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, a_t], ['E=0', self._c, self._c_time]], headers=['', 'D=1', 'Person-time'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Incidence Rate Difference ')
print('======================================================================')
print(self.results[['IncRate', 'SD(IncRate)', 'IncRate_LCL', 'IncRate_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['IncRateDiff', 'SD(IRD)', 'IRD_LCL', 'IRD_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('Missing T: ', self._missing_t)
print('======================================================================')
def plot(self, measure='incidence_rate_difference', center=0, **errorbar_kwargs):
"""Plot the incidence rate differences or the incidence rates along with their corresponding confidence
intervals. This option is an alternative to summary(), which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display incidence rate ratios or incidence rates. Default is to display the incidence rate
differences. Options are;
* 'incidence_rate_difference' : display incidence rate differences
* 'incidence_rate' : display incidence rates
center : str, optional
Sets a reference line. For the incidence rate difference, the reference line defaults to 0. For incidence
rates, no reference line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'incidence_rate_difference':
ax = _plotter(estimate=self.results['IncRateDiff'], lcl=self.results['IRD_LCL'],
ucl=self.results['IRD_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
ax.set_title('Incidence Rate Difference')
elif measure == 'incidence_rate':
ax = _plotter(estimate=self.results['IncRate'], lcl=self.results['IncRate_LCL'],
ucl=self.results['IncRate_UCL'], labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Incidence Rate')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "incidence_rate_difference" or "incidence_rate" for plots')
return ax
def _plotter(estimate, lcl, ucl, labels, center=0, **errorbar_kwargs):
"""
Plot functionality to be used by all the measure classes. Internal functional for all the other plotting
functionalities.
The main function is matplotlib.errorbar, see defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
"""
ypoints = np.arange(len(labels))
ax = plt.gca()
errorbar_kwargs.setdefault('fmt', 'o')
errorbar_kwargs.setdefault('color', 'k')
absolute_errors_from_estimate = np.abs(estimate.values - | np.vstack((lcl, ucl)) | numpy.vstack |
"""
N phase calculation engine
(c) <NAME>, 2018
"""
import numpy as np
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix, hstack, vstack
from scipy.sparse.linalg import factorized
from scipy.sparse.linalg import splu
def gauss_seidel_power_flow(Vbus, Sbus, Ibus, Ybus,
P0, Q0, exp_p, exp_q, V0,
A, B, C,
pq, pv, tol, max_iter, verbose=False):
"""
Gauss-Seidel power flow
:param Vbus: Bus voltage complex vector
:param Sbus: Bus complex power injections vector
:param Ibus: Bus complex current injections vector
:param Ybus: Nodal admittance matrix (complex and sparse)
:param P0: Exponential load parameter P0
:param Q0: Exponential load parameter Q0
:param exp_p: Exponential load parameter exp_p
:param exp_q: Exponential load parameter exp_q
:param V0: Exponential load parameter V0
:param A: Polynomial load parameter A
:param B: Polynomial load parameter B
:param C: Polynomial load parameter C
:param pq: list of pq marked nodes
:param pv: list of pv marked nodes
:param tol: tolerance of the solution
:param max_iter: Maximum number of iterations
:return: Voltage vector (solution), converged?, power error
"""
factor = 0.9
V = Vbus.copy()
Vm = np.abs(V)
# compute error
mis = V * np.conj(Ybus * V - Ibus) - Sbus
F = np.r_[mis[pv].real, mis[pq].real, mis[pq].imag]
error = np.linalg.norm(F, np.Inf)
# check convergence
converged = error < tol
# Gauss-Seidel
iter_ = 0
while not converged and iter_ < max_iter:
# compute the exponential load model injection
Vm = np.abs(V)
Pexp = P0 / (np.power(V0, exp_p)) * np.power(Vm, exp_p)
Qexp = Q0 / (np.power(V0, exp_q)) * np.power(Vm, exp_q)
Sexp = Pexp + 1j * Qexp
# compute the polynomial load model
Spoly = A + B * Vm + C * np.power(Vm, 2.0)
for k in pq:
V[k] += factor * (np.conj((Sbus[k] - Sexp[k] - Spoly[k]) / V[k] + Ibus[k]) - Ybus[k, :] * V) / Ybus[k, k] # compute the voltage
for k in pv:
# get the reactive power
Q = (V[k] * np.conj(Ybus[k, :] * V - Ibus[k])).imag
# compose the new complex power
Sbus[k] = Sbus[k].real + 1j * Q
# compute the voltage
V[k] += factor * (np.conj((Sbus[k] - Sexp[k] - Spoly[k]) / V[k]) - Ybus[k, :] * V) / Ybus[k, k]
# correct the voltage with the specified module of the voltage
V[k] *= Vm[k] / np.abs(V[k])
# compute error
Scalc = V * np.conj(Ybus * V - Ibus) # computed nodal power
mis = Scalc - (Sbus - Sexp - Spoly) # power mismatch
F = np.r_[mis[pv].real, mis[pq].real, mis[pq].imag] # array of particular mismatch values
error = np.linalg.norm(F, np.Inf) # infinite norm of the mismatch vector
# check convergence
converged = error < tol
iter_ += 1
if verbose:
print('V: iter:', iter_, 'err:', error)
print(np.abs(V))
return V, converged, error
def jacobian(Ybus, Vbus, Ibus, pq, pvpq):
"""
Computes the system Jacobian matrix
Args:
Ybus: Admittance matrix
Vbus: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses in that precise order
Returns:
The system Jacobian matrix
"""
ib = range(len(Vbus))
Ibus = Ybus * Vbus - Ibus
diagV = csc_matrix((Vbus, (ib, ib)))
diagIbus = csc_matrix((Ibus, (ib, ib)))
diagVnorm = csc_matrix((Vbus / np.abs(Vbus), (ib, ib)))
dS_dVm = diagV * np.conj(Ybus * diagVnorm) + np.conj(diagIbus) * diagVnorm
dS_dVa = 1j * diagV * np.conj(diagIbus - Ybus * diagV)
J11 = dS_dVa[np.array([pvpq]).T, pvpq].real
J12 = dS_dVm[np.array([pvpq]).T, pq].real
J21 = dS_dVa[np.array([pq]).T, pvpq].imag
J22 = dS_dVm[np.array([pq]).T, pq].imag
J = vstack([hstack([J11, J12]),
hstack([J21, J22])], format="csr")
return J
def newton_raphson_power_flow(Vbus, Sbus, Ibus, Ybus,
P0, Q0, exp_p, exp_q, V0,
A, B, C,
pq, pv, tol, max_iter, verbose=False):
"""
Solves the power flow using a full Newton's method with the Iwamoto optimal step factor.
Args:
Vbus: Array of nodal voltages (initial solution)
Sbus: Array of nodal power injections
Ibus: Array of nodal current injections
Ybus: Admittance matrix
P0: Exponential load parameter P0
Q0: Exponential load parameter Q0
exp_p: Exponential load parameter exp_p
exp_q: Exponential load parameter exp_q
V0: Exponential load parameter V0
A: Polynomial load parameter A
B: Polynomial load parameter B
C: Polynomial load parameter C
pv: Array with the indices of the PV buses
pq: Array with the indices of the PQ buses
tol: Tolerance
max_it: Maximum number of iterations
robust: Boolean variable for the use of the Iwamoto optimal step factor.
Returns:
Voltage solution, converged?, error, calculated power injections
@author: <NAME> (PSERC Cornell)
@Author: <NAME>
"""
# initialize
converged = 0
iter_ = 0
V = Vbus
Va = np.angle(V)
Vm = np.abs(V)
dVa = | np.zeros_like(Va) | numpy.zeros_like |
from __future__ import absolute_import, print_function, division
import os
import numpy as np
import unittest
import theano
from theano import config, function, tensor
from theano.compat import PY3
from theano.misc.pkl_utils import CompatUnpickler
from theano.sandbox import multinomial
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import theano.tests.unittest_tools as utt
from .config import mode_with_gpu
from ..multinomial import (GPUAMultinomialFromUniform,
GPUAChoiceFromUniform)
def test_multinomial_output_dtype():
# This tests the MultinomialFromUniform Op directly, not going through the
# multinomial() call in GPU random generation.
p = tensor.fmatrix()
u = tensor.fvector()
for dtype in ['int64', 'float32', 'float16', 'float64', 'int32', 'auto']:
m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
# the m*2 allows the multinomial to reuse output
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]])
def test_multinomial_input_dtype():
# This tests the MultinomialFromUniform Op directly, not going through the
# multinomial() call in GPU random generation.
for idtype in ['float32', 'float16', 'float64']:
for odtype in ['float32', 'float16', 'float64', 'int32']:
p = tensor.matrix('p', idtype)
u = tensor.vector('u', idtype)
# p = tensor.dmatrix('p')
# u = tensor.dvector('u')
m = theano.sandbox.multinomial.MultinomialFromUniform(odtype)(p, u)
# the m*2 allows the multinomial to reuse output
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]])
# TODO: check a bigger example (make sure blocking on GPU is handled correctly)
def test_multinomial_large():
# DEBUG_MODE will test this on GPU
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u)
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4,
dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
assert mval.shape == pval.shape
if config.cast_policy == 'custom':
assert mval.dtype == pval.dtype
elif config.cast_policy == 'numpy+floatX':
assert mval.dtype == config.floatX
elif config.cast_policy == 'numpy':
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
utt.assert_allclose(mval.sum(axis=1), 2)
asdf = np.asarray([0, 0, 2, 0]) + 0 * pval
utt.assert_allclose(mval, asdf) # broadcast over all rows
def test_gpu_opt_dtypes():
# Test if the returned samples are of the datatype specified
for dtype in ['uint32', 'float32', 'int64', 'float64']:
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
samples = f(pval, uval)
assert samples.dtype == dtype, "%s != %s" % (samples.dtype, dtype)
def test_gpu_opt():
# Does have some overlap with test_multinomial_0
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
f = function([r, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
class test_OP_wor(unittest.TestCase):
def test_select_distinct(self):
# Tests that ChoiceFromUniform always selects distinct elements
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
res = np.squeeze(res)
assert len(res) == i, res
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
# Tests that ChoiceFromUniform fails when asked to sample more
# elements than the actual number of elements
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 200
np.random.seed(12345)
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
def test_select_proportional_to_weight(self):
# Tests that ChoiceFromUniform selects elements, on average,
# proportional to the their probabilities
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = np.random.rand(n_selected).astype(config.floatX)
res = f(pvals, uni, n_selected)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol, avg_diff
class test_function_wor(unittest.TestCase):
def test_select_distinct(self):
# Tests that multinomial_wo_replacement always selects distinct elements
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, i)
res = np.squeeze(res)
assert len(res) == i
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
# Tests that multinomial_wo_replacement fails when asked to sample more
# elements than the actual number of elements
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 200
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, n_selected)
def test_select_proportional_to_weight(self):
# Tests that multinomial_wo_replacement selects elements, on average,
# proportional to the their probabilities
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
res = f(pvals, n_selected)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol
def test_gpu_opt_wor():
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
for replace in [False, True]:
m = multinomial.ChoiceFromUniform(odtype='auto',
replace=replace)(p, u, n)
assert m.dtype == 'int64', m.dtype
f = function([p, u, n], m, allow_input_downcast=True,
mode=mode_with_gpu)
assert any([type(node.op) is GPUAChoiceFromUniform
for node in f.maker.fgraph.toposort()])
n_samples = 3
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones(pval.shape[0] * n_samples) * 0.5
f(pval, uval, n_samples)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.ChoiceFromUniform('auto', replace=replace)(r, u, n)
assert m.dtype == 'int64', m.dtype
f = function([r, u, n], m, allow_input_downcast=True,
mode=mode_with_gpu)
assert any([type(node.op) is GPUAChoiceFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = | np.ones_like(pval[:, 0]) | numpy.ones_like |
#
from positive import *
# # Return the min and max limits of an 1D array
# def lim(x):
# # Import useful bit
# from numpy import array,ndarray
# if not isinstance(x,ndarray):
# x = array(x)
# # Columate input.
# z = x.reshape((x.size,))
# # Return min and max as list
# return array([min(z),max(z)]) + (0 if len(z)==1 else array([-1e-20,1e-20]))
# Function to produce array of color vectors
def rgb( N, #
offset = None, #
speed = None, #
plot = False, #
shift = None, #
jet = False, #
reverse = False, #
weights = None, #
grayscale = None, #
verbose = None ): #
'''
Function to produce array of color vectors.
'''
#
from numpy import array,pi,sin,arange,linspace,amax,mean,sqrt
# If bad first intput, let the people know.
if not isinstance( N, int ):
msg = 'First input must be '+cyan('int')+'.'
raise ValueError(msg)
#
if offset is None:
offset = pi/4.0
#
if speed is None:
speed = 2.0
#
if shift is None:
shift = 0
#
if jet:
offset = -pi/2.1
shift = pi/2.0
#
if weights is None:
t_range = linspace(1,0,N)
else:
if len(weights)==N:
t_range = array(weights)
t_range /= 1 if 0==amax(t_range) else amax(t_range)
else:
error('weights must be of length N','rgb')
#
if reverse:
t_range = linspace(1,0,N)
else:
t_range = linspace(0,1,N)
#
r = array([ 1, 0, 0 ])
g = array([ 0, 1, 0 ])
b = array([ 0, 0, 1 ])
#
clr = []
w = pi/2.0
for t in t_range:
#
if not grayscale:
R = r*sin( w*t + shift )
G = g*sin( w*t*speed + offset + shift )
B = b*sin( w*t + pi/2 + shift )
else:
R = r*t
G = g*t
B = b*t
# Ensure that all color vectors have a mean that is the golden ratio (less than one)
V = abs(R+G+B)
if not grayscale:
V /= mean(V)*0.5*(1+sqrt(5))
# But make sure that all values are bounded by one
V = array([ min(v,1) for v in V ])
# Add color vector to output
clr.append( V )
#
if plot:
#
from matplotlib import pyplot as p
#
fig = p.figure()
fig.set_facecolor("white")
#
for k in range(N):
p.plot( array([0,1]), (k+1.0)*array([1,1])/N, linewidth=20, color = clr[k] )
#
p.axis('equal')
p.axis('off')
#
p.ylim([-1.0/N,1.0+1.0/N])
p.show()
#
return array(clr)
# Plot 2d surface and related scatter points
def splot( domain,
scalar_range,
domain2=None,
scalar_range2=None,
kind=None,
ms=60,
cbfs=16,
color_scatter=True,
verbose=True):
'''Plot 2d surface and related scatter points '''
# Import usefult things
from matplotlib.pyplot import figure,plot,scatter,xlabel,ylabel,savefig,imshow,colorbar,gca
from numpy import linspace,meshgrid,array,angle,unwrap
from positive.maths import sunwrap
from matplotlib import cm
#
plot_scatter = (domain2 is not None) and (scalar_range2 is not None)
#
fig = figure( figsize=2*array([4,2.8]) )
clrmap = cm.coolwarm
#
# Z = abs(SR) if kind=='amp' else angle(SR)
# Z = abs(scalar_range) if kind=='amp' else scalar_range
# Z = sunwrap(angle(scalar_range)) if kind=='phase' else scalar_range
if kind=='amp':
Z = abs(scalar_range)
elif kind=='phase':
Z = sunwrap(angle(scalar_range))
else:
Z = scalar_range
#
norm = cm.colors.Normalize(vmax=1.1*Z.max(), vmin=Z.min())
# Plot scatter of second dataset
if plot_scatter:
#
if color_scatter:
mkr = 'o'
else:
mkr = 's'
# Set marker size
mkr_size = ms
# Scatter the outline of domain points
scatter( domain2[:,0], domain2[:,1], mkr_size + 5, color='k', alpha=0.6 if color_scatter else 0.333, marker=mkr, facecolors='none' if color_scatter else 'none' )
# Scatter the location of domain points and color by value
if color_scatter:
Z_ = abs(scalar_range2) if kind=='amp' else sunwrap(angle(scalar_range2))
scatter( domain2[:,0],domain2[:,1], mkr_size, c=Z_,
marker='o',
cmap=clrmap, norm=norm, edgecolors='none' )
#
extent = (domain[:,0].min(),domain[:,0].max(),domain[:,1].min(),domain[:,1].max())
im = imshow(Z, extent=extent, aspect='auto',
cmap=clrmap, origin='lower', norm=norm )
#
cb = colorbar()
cb_range = linspace(Z.min(),Z.max(),5)
cb.set_ticks( cb_range )
cb.set_ticklabels( [ '%1.3f'%k for k in cb_range ] )
cb.ax.tick_params(labelsize=cbfs)
#
return gca()
#
def sYlm_mollweide_plot(l,m,ax=None,title=None,N=100,form=None,s=-2,colorbar_shrink=0.68):
'''
Plot spin weighted spherical harmonic.
'''
#
from matplotlib.pyplot import subplots,gca,gcf,figure,colorbar,draw
from numpy import array,pi,linspace,meshgrid
# Coordinate arrays for the graphical representation
x = linspace(-pi, pi, N)
y = linspace(-pi/2, pi/2, N/2)
X, Y = meshgrid(x, y)
# Spherical coordinate arrays derived from x, y
theta = pi/2 - y
phi = x.copy()
#
if form in (None,'r','re','real'):
SYLM_fun = lambda S,L,M,TH,PH: sYlm(S,L,M,TH,PH).real.T
title = r'$\Re(_{%i}Y_{%i%i})$'%(s,l,m)
elif form in ('i','im','imag'):
SYLM_fun = lambda S,L,M,TH,PH: sYlm(S,L,M,TH,PH).imag.T
title = r'$\Im(_{%i}Y_{%i%i})$'%(s,l,m)
elif form in ('a','ab','abs'):
SYLM_fun = lambda S,L,M,TH,PH: abs(sYlm(S,L,M,TH,PH)).T
title = r'$|_{%i}Y_{%i%i}|$'%(s,l,m)
elif form in ('+','plus'):
SYLM_fun = lambda S,L,M,TH,PH: ( sYlm(S,L,M,TH,PH) + sYlm(S,L,-M,TH,PH) ).real.T
title = r'$ _{%i}Y^{+}_{%i%i} = \Re \; \left[ \sum_{m\in \{%i,%i\}} \, _{%i} Y_{%i m} \; \right] $'%(s,l,m,m,-m,s,l,)
elif form in ('x','cross'):
SYLM_fun = lambda S,L,M,TH,PH: ( sYlm(S,L,M,TH,PH) + sYlm(S,L,-M,TH,PH) ).imag.T
title = r'$ _{%i}Y^{\times}_{%i%i} = \Im \; \left[ \sum_{m\in \{%i,%i\}} \, _{%i} Y_{%i m} \; \right] $'%(s,l,m,m,-m,s,l,)
#
Z = SYLM_fun( -2,l,m,theta,phi )
xlabels = ['$210^\circ$', '$240^\circ$','$270^\circ$','$300^\circ$','$330^\circ$',
'$0^\circ$', '$30^\circ$', '$60^\circ$', '$90^\circ$','$120^\circ$', '$150^\circ$']
ylabels = ['$165^\circ$', '$150^\circ$', '$135^\circ$', '$120^\circ$',
'$105^\circ$', '$90^\circ$', '$75^\circ$', '$60^\circ$',
'$45^\circ$','$30^\circ$','$15^\circ$']
#
if ax is None:
fig, ax = subplots(subplot_kw=dict(projection='mollweide'), figsize= 1*array([10,8]) )
#
im = ax.pcolormesh(X,Y,Z)
ax.set_xticklabels(xlabels, fontsize=14)
ax.set_yticklabels(ylabels, fontsize=14)
# ax.set_title( title, fontsize=20)
ax.set_xlabel(r'$\phi$', fontsize=20)
ax.set_ylabel(r'$\theta$', fontsize=20)
ax.grid()
colorbar(im, ax=ax, orientation='horizontal',shrink=colorbar_shrink,label=title)
gcf().canvas.draw_idle()
#
def plot3Dpoint(ax,vec,label,note,marker='o',s=40,color='g',mfc='none',va='bottom',ha='right',la=0.8,ts=16,normalize=True):
'''Plot 3D point'''
# Import usefuls
from numpy import linalg
# Plot 3D point
foo = vec/ ( ( linalg.norm(vec) if linalg.norm(vec) else 1.0 ) if normalize else 1 )
ax.scatter( foo[0], foo[1], foo[2], label=label, color=color, marker=marker, s=s, facecolor=mfc,zorder=-80 )
ax.text(foo[0], foo[1], foo[2],note,alpha=la,verticalalignment=va,ha=ha,size=ts)
# Plot a 3d meshed sphere
def plot_3d_mesh_sphere(ax=None,nth=30,nph=30,r=1,color='k',lw=1,alpha=0.1,axes_on=True,axes_alpha=0.35,view=None):
#
from numpy import sin,cos,linspace,ones_like,array,pi
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import figure,plot,figaspect,text,axis
#
if view is None:
view = (30,-60)
#
if ax is None:
fig = figure( figsize=4*figaspect(1) )
ax = fig.add_subplot(111,projection='3d')
# See: https://github.com/matplotlib/matplotlib/issues/17172#issuecomment-634964954
ax.set_box_aspect((1, 1, 1))
axis('square')
ax.set_xlim([-r,r])
ax.set_ylim([-r,r])
ax.set_zlim([-r,r])
axis('off')
#
th_ = linspace(0,pi,nth)
ph_ = linspace(0,2*pi,nph)
#
for th in th_:
x = r*sin(th)*cos(ph_)
y = r*sin(th)*sin(ph_)
z = r*cos(th)*ones_like(ph_)
plot(x,y,z,color=color,alpha=alpha,lw=lw)
#
for ph in ph_[:-1]:
x = r*sin(th_)*cos(ph)
y = r*sin(th_)*sin(ph)
z = r*cos(th_)
plot(x,y,z,color=color,alpha=alpha,lw=lw)
#
if axes_on:
#
for ph in [ 0, pi, pi/2, 3*pi/2 ]:
x = r*sin(th_)*cos(ph)
y = r*sin(th_)*sin(ph)
z = r*cos(th_)
plot(x,y,z,color='k',alpha=axes_alpha,lw=lw,ls='--')
#
for th in [ pi/2 ]:
x = r*sin(th)*cos(ph_)
y = r*sin(th)*sin(ph_)
z = r*cos(th)* | ones_like(ph_) | numpy.ones_like |
import os
import sys
import time
import pdb
import gc
import numpy as np
import faiss
import argparse
import resource
import benchmark.datasets
from benchmark.datasets import DATASETS
from benchmark.plotting import eval_range_search
####################################################################
# Index building functions
####################################################################
def two_level_clustering(xt, nc1, nc2, clustering_niter=25, spherical=False):
d = xt.shape[1]
print(f"2-level clustering of {xt.shape} nb clusters = {nc1}*{nc2} = {nc1*nc2}")
print("perform coarse training")
km = faiss.Kmeans(
d, nc1, verbose=True, niter=clustering_niter,
max_points_per_centroid=2000,
spherical=spherical
)
km.train(xt)
print()
# coarse centroids
centroids1 = km.centroids
print("assigning the training set")
t0 = time.time()
_, assign1 = km.assign(xt)
bc = np.bincount(assign1, minlength=nc1)
print(f"done in {time.time() - t0:.2f} s. Sizes of clusters {min(bc)}-{max(bc)}")
o = assign1.argsort()
del km
# train sub-clusters
i0 = 0
c2 = []
t0 = time.time()
for c1 in range(nc1):
print(f"[{time.time() - t0:.2f} s] training sub-cluster {c1}/{nc1}\r", end="", flush=True)
i1 = i0 + bc[c1]
subset = o[i0:i1]
assert np.all(assign1[subset] == c1)
km = faiss.Kmeans(d, nc2, spherical=spherical)
xtsub = xt[subset]
km.train(xtsub)
c2.append(km.centroids)
i0 = i1
print(f"done in {time.time() - t0:.2f} s")
return np.vstack(c2)
def unwind_index_ivf(index):
if isinstance(index, faiss.IndexPreTransform):
assert index.chain.size() == 1
vt = faiss.downcast_VectorTransform(index.chain.at(0))
index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index))
assert vt2 is None
return index_ivf, vt
if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine):
return unwind_index_ivf(faiss.downcast_index(index.base_index))
if isinstance(index, faiss.IndexIVF):
return index, None
else:
return None, None
def build_index(args, ds):
nq, d = ds.nq, ds.d
nb, d = ds.nq, ds.d
if args.buildthreads == -1:
print("Build-time number of threads:", faiss.omp_get_max_threads())
else:
print("Set build-time number of threads:", args.buildthreads)
faiss.omp_set_num_threads(args.buildthreads)
metric_type = (
faiss.METRIC_L2 if ds.distance() == "euclidean" else
faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else
1/0
)
print("metric type", metric_type)
index = faiss.index_factory(d, args.indexkey, metric_type)
index_ivf, vec_transform = unwind_index_ivf(index)
if vec_transform is None:
vec_transform = lambda x: x
else:
vec_transform = faiss.downcast_VectorTransform(vec_transform)
if args.by_residual != -1:
by_residual = args.by_residual == 1
print("setting by_residual = ", by_residual)
index_ivf.by_residual # check if field exists
index_ivf.by_residual = by_residual
if index_ivf:
print("Update add-time parameters")
# adjust default parameters used at add time for quantizers
# because otherwise the assignment is inaccurate
quantizer = faiss.downcast_index(index_ivf.quantizer)
if isinstance(quantizer, faiss.IndexRefine):
print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ")
quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64
print(quantizer.k_factor)
base_index = faiss.downcast_index(quantizer.base_index)
if isinstance(base_index, faiss.IndexIVF):
print(" update quantizer nprobe=", base_index.nprobe, end=" -> ")
base_index.nprobe = (
16 if base_index.nlist < 1e5 else
32 if base_index.nlist < 4e6 else
64)
print(base_index.nprobe)
elif isinstance(quantizer, faiss.IndexHNSW):
print(" update quantizer efSearch=", quantizer.hnsw.efSearch, end=" -> ")
if args.quantizer_add_efSearch > 0:
quantizer.hnsw.efSearch = args.quantizer_add_efSearch
else:
quantizer.hnsw.efSearch = 40 if index_ivf.nlist < 4e6 else 64
print(quantizer.hnsw.efSearch)
if args.quantizer_efConstruction != -1:
print(" update quantizer efConstruction=", quantizer.hnsw.efConstruction, end=" -> ")
quantizer.hnsw.efConstruction = args.quantizer_efConstruction
print(quantizer.hnsw.efConstruction)
index.verbose = True
if index_ivf:
index_ivf.verbose = True
index_ivf.quantizer.verbose = True
index_ivf.cp.verbose = True
maxtrain = args.maxtrain
if maxtrain == 0:
if 'IMI' in args.indexkey:
maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2))
elif index_ivf:
maxtrain = 50 * index_ivf.nlist
else:
# just guess...
maxtrain = 256 * 100
maxtrain = max(maxtrain, 256 * 100)
print("setting maxtrain to %d" % maxtrain)
# train on dataset
print(f"getting first {maxtrain} dataset vectors for training")
xt2 = next(ds.get_dataset_iterator(bs=maxtrain))
print("train, size", xt2.shape)
assert np.all(np.isfinite(xt2))
t0 = time.time()
if (isinstance(vec_transform, faiss.OPQMatrix) and
isinstance(index_ivf, faiss.IndexIVFPQFastScan)):
print(" Forcing OPQ training PQ to PQ4")
ref_pq = index_ivf.pq
training_pq = faiss.ProductQuantizer(
ref_pq.d, ref_pq.M, ref_pq.nbits
)
vec_transform.pq
vec_transform.pq = training_pq
if args.clustering_niter >= 0:
print(("setting nb of clustering iterations to %d" %
args.clustering_niter))
index_ivf.cp.niter = args.clustering_niter
train_index = None
if args.train_on_gpu:
print("add a training index on GPU")
train_index = faiss.index_cpu_to_all_gpus(
faiss.IndexFlatL2(index_ivf.d))
index_ivf.clustering_index = train_index
if args.two_level_clustering:
sqrt_nlist = int(np.sqrt(index_ivf.nlist))
assert sqrt_nlist ** 2 == index_ivf.nlist
centroids_trainset = xt2
if isinstance(vec_transform, faiss.VectorTransform):
print(" training vector transform")
vec_transform.train(xt2)
print(" transform trainset")
centroids_trainset = vec_transform.apply_py(centroids_trainset)
centroids = two_level_clustering(
centroids_trainset, sqrt_nlist, sqrt_nlist,
spherical=(metric_type == faiss.METRIC_INNER_PRODUCT)
)
if not index_ivf.quantizer.is_trained:
print(" training quantizer")
index_ivf.quantizer.train(centroids)
print(" add centroids to quantizer")
index_ivf.quantizer.add(centroids)
index.train(xt2)
print(" Total train time %.3f s" % (time.time() - t0))
if train_index is not None:
del train_index
index_ivf.clustering_index = None
gc.collect()
print("adding")
t0 = time.time()
if args.add_bs == -1:
index.add(sanitize(ds.get_database()))
else:
i0 = 0
nsplit = args.add_splits
for sno in range(nsplit):
print(f"============== SPLIT {sno}/{nsplit}")
for xblock in ds.get_dataset_iterator(bs=args.add_bs, split=(nsplit, sno)):
i1 = i0 + len(xblock)
print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % (
i0, i1, ds.nb, time.time() - t0,
faiss.get_mem_usage_kb()))
index.add(xblock)
i0 = i1
gc.collect()
if sno == args.stop_at_split:
print("stopping at split", sno)
break
print(" add in %.3f s" % (time.time() - t0))
if args.indexfile:
print("storing", args.indexfile)
faiss.write_index(index, args.indexfile)
return index
####################################################################
# Evaluation functions
####################################################################
def compute_inter(a, b):
nq, rank = a.shape
ninter = sum(
np.intersect1d(a[i, :rank], b[i, :rank]).size
for i in range(nq)
)
return ninter / a.size
def knn_search_batched(index, xq, k, bs):
D, I = [], []
for i0 in range(0, len(xq), bs):
Di, Ii = index.search(xq[i0:i0 + bs], k)
D.append(Di)
I.append(Ii)
return np.vstack(D), np.vstack(I)
def eval_setting_knn(index, xq, gt, k=0, inter=False, min_time=3.0, query_bs=-1):
nq = xq.shape[0]
gt_I, gt_D = gt
ivf_stats = faiss.cvar.indexIVF_stats
ivf_stats.reset()
nrun = 0
t0 = time.time()
while True:
if query_bs == -1:
D, I = index.search(xq, k)
else:
D, I = knn_search_batched(index, xq, k, query_bs)
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun)
if inter:
rank = k
inter_measure = compute_inter(gt_I[:, :rank], I[:, :rank])
print("%.4f" % inter_measure, end=' ')
else:
for rank in 1, 10, 100:
n_ok = (I[:, :rank] == gt_I[:, :1]).sum()
print("%.4f" % (n_ok / float(nq)), end=' ')
print(" %9.5f " % ms_per_query, end=' ')
if ivf_stats.search_time == 0:
# happens for IVFPQFastScan where the stats are not logged by default
print("%12d %5.2f " % (ivf_stats.ndis / nrun, 0.0), end=' ')
else:
pc_quantizer = ivf_stats.quantization_time / ivf_stats.search_time * 100
print("%12d %5.2f " % (ivf_stats.ndis / nrun, pc_quantizer), end=' ')
print(nrun)
def eval_setting_range(index, xq, gt, radius=0, inter=False, min_time=3.0, query_bs=-1):
nq = xq.shape[0]
gt_nres, gt_I, gt_D = gt
gt_lims = np.zeros(nq + 1, dtype=int)
gt_lims[1:] = np.cumsum(gt_nres)
ivf_stats = faiss.cvar.indexIVF_stats
ivf_stats.reset()
nrun = 0
t0 = time.time()
while True:
if query_bs == -1:
lims, D, I = index.range_search(xq, radius)
else:
raise NotImplemented
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun)
ap = eval_range_search.compute_AP((gt_lims, gt_I, gt_D), (lims, I, D))
print("%.4f" % ap, end=' ')
print(" %9.5f " % ms_per_query, end=' ')
print("%12d %5d " % (ivf_stats.ndis / nrun, D.size), end=' ')
print(nrun)
def result_header(ds, args):
# setup the Criterion object
if ds.search_type() == "range":
header = (
'%-40s AP time(ms/q) nb distances nb_res #runs' %
"parameters"
)
crit = None
elif args.inter:
print("Optimize for intersection @ ", args.k)
crit = faiss.IntersectionCriterion(ds.nq, args.k)
header = (
'%-40s inter@%3d time(ms/q) nb distances %%quantization #runs' %
("parameters", args.k)
)
else:
print("Optimize for 1-recall @ 1")
crit = faiss.OneRecallAtRCriterion(ds.nq, 1)
header = (
'%-40s R@1 R@10 R@100 time(ms/q) nb distances %%quantization #runs' %
"parameters"
)
return header, crit
def op_compute_bounds(ps, ops, cno):
# lower_bound_t = 0.0
# upper_bound_perf = 1.0
bounds = np.array([0, 1], dtype="float64")
sp = faiss.swig_ptr
for i in range(ops.all_pts.size()):
ps.update_bounds(cno, ops.all_pts.at(i), sp(bounds[1:2]), sp(bounds[0:1]))
# lower_bound_t, upper_bound_perf
return bounds[0], bounds[1]
def explore_parameter_space_range(index, xq, gt, ps, radius):
""" exploration of the parameter space for range search, using the
Average Precision as criterion
"""
n_experiments = ps.n_experiments
n_comb = ps.n_combinations()
min_time = ps.min_test_duration
verbose = ps.verbose
gt_nres, gt_I, gt_D = gt
gt_lims = np.zeros(len(gt_nres) + 1, dtype=int)
gt_lims[1:] = np.cumsum(gt_nres)
gt = (gt_lims, gt_I, gt_D)
ops = faiss.OperatingPoints()
def run_1_experiment(cno):
ps.set_index_parameters(index, cno)
nrun = 0
t0 = time.time()
while True:
lims, D, I = index.range_search(xq, radius)
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
t_search = (t1 - t0) / nrun
perf = eval_range_search.compute_AP(gt, (lims, I, D))
keep = ops.add(perf, t_search, ps.combination_name(cno), cno)
return len(D), perf, t_search, nrun, keep
if n_experiments == 0:
# means exhaustive run
for cno in range(n_comb):
nres, perf, t_search, nrun, keep = run_1_experiment(cno)
if verbose:
print(" %d/%d: %s nres=%d perf=%.3f t=%.3f s %s" % (
cno, n_comb,
ps.combination_name(cno),
nres, perf, t_search, "*" if keep else ""))
return ops
n_experiments = min(n_experiments, n_comb)
perm = np.zeros(n_experiments, int)
# make sure the slowest and fastest experiment are run
perm[0] = 0
perm[1] = n_comb - 1
rs = np.random.RandomState(1234)
perm[2:] = 1 + rs.choice(n_comb - 2, n_experiments - 2, replace=False)
for xp, cno in enumerate(perm):
cno = int(cno)
if verbose:
print(" %d/%d: cno=%d %s " % (
xp, n_experiments, cno, ps.combination_name(cno)),
end="", flush=True)
# check if we can skip this experiment
lower_bound_t, upper_bound_perf = op_compute_bounds(ps, ops, cno)
best_t = ops.t_for_perf(upper_bound_perf)
if verbose:
print("bounds [perf<=%.3f t>=%.3f] " % (
upper_bound_perf, lower_bound_t),
end="skip\n" if best_t <= lower_bound_t else " "
)
if best_t <= lower_bound_t:
continue
nres, perf, t_search, nrun, keep = run_1_experiment(cno)
if verbose:
print(" nres %d perf %.3f t %.3f (%d %s) %s" % (
nres, perf, t_search, nrun,
"runs" if nrun >= 2 else "run",
"*" if keep else ""))
return ops
####################################################################
# Driver functions
####################################################################
def run_experiments_searchparams(ds, index, args):
"""
Evaluate a predefined set of runtime parameters
"""
k = args.k
xq = ds.get_queries()
nq = len(xq)
ps = faiss.ParameterSpace()
ps.initialize(index)
header, _ = result_header(ds, args)
searchparams = args.searchparams
print(f"Running evaluation on {len(searchparams)} searchparams")
print(header)
maxw = max(max(len(p) for p in searchparams), 40)
for params in searchparams:
ps.set_index_parameters(index, params)
print(params.ljust(maxw), end=' ')
sys.stdout.flush()
if ds.search_type() == "knn":
eval_setting_knn(
index, xq, ds.get_groundtruth(k=args.k),
k=args.k,
inter=args.inter, min_time=args.min_test_duration,
query_bs=args.query_bs
)
else:
eval_setting_range(
index, xq, ds.get_groundtruth(k=args.k),
radius=args.radius,
inter=args.inter, min_time=args.min_test_duration,
query_bs=args.query_bs
)
def run_experiments_autotune(ds, index, args):
""" Explore the space of parameters and keep Pareto-optimal ones. """
k = args.k
xq = ds.get_queries()
nq = len(xq)
ps = faiss.ParameterSpace()
ps.initialize(index)
ps.n_experiments = args.n_autotune
ps.min_test_duration = args.min_test_duration
for kv in args.autotune_max:
k, vmax = kv.split(':')
vmax = float(vmax)
print("limiting %s to %g" % (k, vmax))
pr = ps.add_range(k)
values = faiss.vector_to_array(pr.values)
values = np.array([v for v in values if v < vmax])
faiss.copy_array_to_vector(values, pr.values)
for kv in args.autotune_range:
k, vals = kv.split(':')
vals = np.fromstring(vals, sep=',')
print("setting %s to %s" % (k, vals))
pr = ps.add_range(k)
faiss.copy_array_to_vector(vals, pr.values)
header, crit = result_header(ds, args)
# then we let Faiss find the optimal parameters by itself
print("exploring operating points, %d threads" % faiss.omp_get_max_threads());
ps.display()
t0 = time.time()
if ds.search_type() == "knn":
# by default, the criterion will request only 1 NN
crit.nnn = args.k
gt_I, gt_D = ds.get_groundtruth(k=args.k)
crit.set_groundtruth(None, gt_I.astype('int64'))
op = ps.explore(index, xq, crit)
elif ds.search_type() == "range":
op = explore_parameter_space_range(
index, xq, ds.get_groundtruth(), ps, args.radius
)
else:
assert False
print("Done in %.3f s, available OPs:" % (time.time() - t0))
op.display()
print("Re-running evaluation on selected OPs")
print(header)
opv = op.optimal_pts
maxw = max(max(len(opv.at(i).key) for i in range(opv.size())), 40)
for i in range(opv.size()):
opt = opv.at(i)
ps.set_index_parameters(index, opt.key)
print(opt.key.ljust(maxw), end=' ')
sys.stdout.flush()
if ds.search_type() == "knn":
eval_setting_knn(
index, xq, ds.get_groundtruth(k=args.k),
k=args.k,
inter=args.inter, min_time=args.min_test_duration
)
else:
eval_setting_range(
index, xq, ds.get_groundtruth(k=args.k),
radius=args.radius,
inter=args.inter, min_time=args.min_test_duration
)
class DatasetWrapInPairwiseQuantization:
def __init__(self, ds, C):
self.ds = ds
self.C = C
self.Cq = np.linalg.inv(C.T)
# xb_pw = np.ascontiguousarray((C @ xb.T).T)
# xq_pw = np.ascontiguousarray((Cq @ xq.T).T)
# copy fields
for name in "nb d nq dtype distance search_type get_groundtruth".split():
setattr(self, name, getattr(ds, name))
def get_dataset(self):
return self.ds.get_dataset() @ self.C.T
def get_queries(self):
return self.ds.get_queries() @ self.Cq.T
def get_dataset_iterator(self, bs=512, split=(1,0)):
for xb in self.ds.get_dataset_iterator(bs=bs, split=split):
yield xb @ self.C.T
####################################################################
# Main
####################################################################
def main():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('What to do')
aa('--build', default=False, action="store_true")
aa('--search', default=False, action="store_true")
aa('--prepare', default=False, action="store_true",
help="call prepare() to download the dataset before computing")
group = parser.add_argument_group('dataset options')
aa('--dataset', choices=DATASETS.keys(), required=True)
aa('--basedir', help="override basedir for dataset")
aa('--pairwise_quantization', default="",
help="load/store pairwise quantization matrix")
aa('--query_bs', default=-1, type=int,
help='perform queries in batches of this size')
group = parser.add_argument_group('index construction')
aa('--indexkey', default='HNSW32', help='index_factory type')
aa('--by_residual', default=-1, type=int,
help="set if index should use residuals (default=unchanged)")
aa('--M0', default=-1, type=int, help='size of base level')
aa('--maxtrain', default=0, type=int,
help='maximum number of training points (0 to set automatically)')
aa('--indexfile', default='', help='file to read or write index from')
aa('--add_bs', default=100000, type=int,
help='add elements index by batches of this size')
aa('--add_splits', default=1, type=int,
help="Do adds in this many splits (otherwise risk of OOM for large datasets)")
aa('--stop_at_split', default=-1, type=int,
help="stop at this split (for debugging)")
aa('--no_precomputed_tables', action='store_true', default=False,
help='disable precomputed tables (uses less memory)')
aa('--clustering_niter', default=-1, type=int,
help='number of clustering iterations (-1 = leave default)')
aa('--two_level_clustering', action="store_true", default=False,
help='perform a 2-level tree clustering')
aa('--train_on_gpu', default=False, action='store_true',
help='do training on GPU')
aa('--quantizer_efConstruction', default=-1, type=int,
help="override the efClustering of the quantizer")
aa('--quantizer_add_efSearch', default=-1, type=int,
help="override the efSearch of the quantizer at add time")
aa('--buildthreads', default=-1, type=int,
help='nb of threads to use at build time')
group = parser.add_argument_group('searching')
aa('--k', default=10, type=int, help='nb of nearest neighbors')
aa('--radius', default=96237, type=float, help='radius for range search')
aa('--inter', default=True, action='store_true',
help='use intersection measure instead of 1-recall as metric')
aa('--searchthreads', default=-1, type=int,
help='nb of threads to use at search time')
aa('--searchparams', nargs='+', default=['autotune'],
help="search parameters to use (can be autotune or a list of params)")
aa('--n_autotune', default=500, type=int,
help="max nb of autotune experiments")
aa('--autotune_max', default=[], nargs='*',
help='set max value for autotune variables format "var:val" (exclusive)')
aa('--autotune_range', default=[], nargs='*',
help='set complete autotune range, format "var:val1,val2,..."')
aa('--min_test_duration', default=3.0, type=float,
help='run test at least for so long to avoid jitter')
aa('--parallel_mode', default=-1, type=int,
help="set search-time parallel mode for IVF indexes")
group = parser.add_argument_group('computation options')
aa("--maxRAM", default=-1, type=int, help="set max RSS in GB (avoid OOM crash)")
args = parser.parse_args()
print("args=", args)
if args.basedir:
print("setting datasets basedir to", args.basedir)
benchmark.datasets.BASEDIR
benchmark.datasets.BASEDIR = args.basedir
if args.maxRAM > 0:
print("setting max RSS to", args.maxRAM, "GiB")
resource.setrlimit(
resource.RLIMIT_DATA, (args.maxRAM * 1024 ** 3, resource.RLIM_INFINITY)
)
os.system('echo -n "nb processors "; '
'cat /proc/cpuinfo | grep ^processor | wc -l; '
'cat /proc/cpuinfo | grep ^"model name" | tail -1')
ds = DATASETS[args.dataset]()
print(ds)
nq, d = ds.nq, ds.d
nb, d = ds.nq, ds.d
if args.prepare:
print("downloading dataset...")
ds.prepare()
print("dataset ready")
if not (args.build or args.search):
return
if args.pairwise_quantization:
if os.path.exists(args.pairwise_quantization):
print("loading pairwise quantization matrix", args.pairwise_quantization)
C = | np.load(args.pairwise_quantization) | numpy.load |
import timeit
from random import randrange
from collections import OrderedDict
import numpy as np
def listCompre(sizeList):
result = [x for x in range(sizeList)]
return result
def numpyList(sizeList):
myList = | np.arange(sizeList) | numpy.arange |
import matplotlib, zipfile
matplotlib.use('agg')
import sys, numpy as np, matplotlib.pyplot as plt, os, tools21cm as t2c, matplotlib.gridspec as gridspec
from sklearn.metrics import matthews_corrcoef
from glob import glob
from tensorflow.keras.models import load_model
from tqdm import tqdm
from config.net_config import NetworkConfig
from utils.other_utils import RotateCube
from utils_network.metrics import iou, iou_loss, dice_coef, dice_coef_loss, balanced_cross_entropy, phi_coef
from utils_network.prediction import SegUnet21cmPredict
from myutils.utils import OrderNdimArray
title_a = '\t\t _ _ _ _ _ \n\t\t| | | | \ | | | | \n\t\t| | | | \| | ___| |_ \n\t\t| | | | . ` |/ _ \ __|\n\t\t| |__| | |\ | __/ |_ \n\t\t \____/|_| \_|\___|\__|\n'
title_b = ' _____ _ _ _ ___ __ \n| __ \ | (_) | | |__ \/_ | \n| |__) | __ ___ __| |_ ___| |_ ___ ) || | ___ _ __ ___ \n| ___/ `__/ _ \/ _` | |/ __| __/ __| / / | |/ __| `_ ` _ \ \n| | | | | __/ (_| | | (__| |_\__ \ / /_ | | (__| | | | | |\n|_| |_| \___|\__,_|_|\___|\__|___/ |____||_|\___|_| |_| |_|\n'
print(title_a+'\n'+title_b)
config_file = sys.argv[1]
conf = PredictionConfig(config_file)
PATH_OUT = conf.path_out
PATH_INPUT = conf.path+conf.pred_data
print(' PATH_INPUT = %s' %PATH_INPUT)
if(PATH_INPUT[-3:] == 'zip'):
ZIPFILE = True
PATH_IN_ZIP = PATH_INPUT[PATH_INPUT.rfind('/')+1:-4]+'/'
PATH_UNZIP = PATH_INPUT[:PATH_INPUT.rfind('/')+1]
MAKE_PLOTS = True
# load model
avail_metrics = {'binary_accuracy':'binary_accuracy', 'iou':iou, 'dice_coef':dice_coef, 'iou_loss':iou_loss, 'dice_coef_loss':dice_coef_loss, 'phi_coef':phi_coef, 'mse':'mse', 'mae':'mae', 'binary_crossentropy':'binary_crossentropy', 'balanced_cross_entropy':balanced_cross_entropy}
MODEL_EPOCH = conf.best_epoch
METRICS = [avail_metrics[m] for m in np.append(conf.loss, conf.metrics)]
cb = {func.__name__:func for func in METRICS if not isinstance(func, str)}
model = load_model('%smodel-sem21cm_ep%d.h5' %(PATH_OUT+'checkpoints/', MODEL_EPOCH), custom_objects=cb)
try:
os.makedirs(PATH_OUT+'predictions')
except:
pass
PATH_OUT += 'predictions/pred_tobs1200/'
print(' PATH_OUTPUT = %s' %PATH_OUT)
try:
os.makedirs(PATH_OUT+'data')
os.makedirs(PATH_OUT+'plots')
except:
pass
if(os.path.exists('%sastro_data.txt' %PATH_OUT)):
astr_data = np.loadtxt('%sastro_data.txt' %PATH_OUT, unpack=True)
restarts = astr_data[6:].argmin(axis=1)
if(all(int(np.mean(restarts)) == restarts)):
restart = int(np.mean(restarts))
print(' Restart from idx=%d' %restart)
else:
ValueError(' Restart points does not match.')
phicoef_seg, phicoef_err, phicoef_sp, xn_mask, xn_seg, xn_err, xn_sp, b0_true, b1_true, b2_true, b0_seg, b1_seg, b2_seg, b0_sp, b1_sp, b2_sp = astr_data[6:]
astr_data = astr_data[:6]
else:
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
astr_data = np.loadtxt(myzip.open('%sastro_params.txt' %PATH_IN_ZIP), unpack=True)
else:
astr_data = np.loadtxt('%sastro_params.txt' %PATH_INPUT, unpack=True)
restart = 0
phicoef_seg = np.zeros(astr_data.shape[1])
phicoef_err = np.zeros_like(phicoef_seg)
phicoef_sp = np.zeros_like(phicoef_seg)
xn_mask = np.zeros_like(phicoef_seg)
xn_seg = np.zeros_like(phicoef_seg)
xn_err = np.zeros_like(phicoef_seg)
xn_sp = np.zeros_like(phicoef_sp)
b0_true = np.zeros_like(phicoef_sp)
b1_true = np.zeros_like(phicoef_sp)
b2_true = np.zeros_like(phicoef_sp)
b0_sp = np.zeros_like(phicoef_sp)
b1_sp = np.zeros_like(phicoef_sp)
b2_sp = np.zeros_like(phicoef_sp)
b0_seg = np.zeros_like(phicoef_sp)
b1_seg = np.zeros_like(phicoef_sp)
b2_seg = np.zeros_like(phicoef_sp)
params = {'HII_DIM':128, 'DIM':384, 'BOX_LEN':256}
my_ext = [0, params['BOX_LEN'], 0, params['BOX_LEN']]
zc = (astr_data[1,:] < 7.5) + (astr_data[1,:] > 8.3)
c1 = (astr_data[5,:]<=0.25)*(astr_data[5,:]>=0.15)*zc
c2 = (astr_data[5,:]<=0.55)*(astr_data[5,:]>=0.45)*zc
c3 = (astr_data[5,:]<=0.75)*(astr_data[5,:]>=0.85)*zc
indexes = astr_data[0,:]
new_idx = indexes[c1+c2+c3].astype(int)
#for i in tqdm(range(restart, astr_data.shape[1])):
print(new_idx)
for new_i in tqdm(range(3, new_idx.size)):
i = new_idx[new_i]
z = astr_data[1,i]
zeta = astr_data[2,i]
Rmfp = astr_data[3,i]
Tvir = astr_data[4,i]
xn = astr_data[5,i]
#print('z = %.3f x_n =%.3f zeta = %.3f R_mfp = %.3f T_vir = %.3f' %(z, xn, zeta, Rmfp, Tvir))
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
f = myzip.extract(member='%simage_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
dT3 = t2c.read_cbin(f)
f = myzip.extract(member='%smask_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
mask_xn = t2c.read_cbin(f)
os.system('rm -r %s/' %(PATH_UNZIP+PATH_IN_ZIP))
else:
dT3 = t2c.read_cbin('%simage_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
mask_xn = t2c.read_cbin('%smask_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
# Calculate Betti number
b0_true[i] = t2c.betti0(data=mask_xn)
b1_true[i] = t2c.betti1(data=mask_xn)
b2_true[i] = t2c.betti2(data=mask_xn)
xn_mask[i] = np.mean(mask_xn)
plt.rcParams['font.size'] = 20
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = False
plt.rcParams['ytick.right'] = False
plt.rcParams['axes.linewidth'] = 1.2
ls = 22
# -------- predict with SegUnet 3D --------
print(' calculating predictioon for data i = %d...' %i)
X_tta = SegUnet21cmPredict(unet=model, x=dT3, TTA=True)
X_seg = np.round(np.mean(X_tta, axis=0))
X_seg_err = np.std(X_tta, axis=0)
# calculate Matthew coef and mean neutral fraction
phicoef_seg[i] = matthews_corrcoef(mask_xn.flatten(), X_seg.flatten())
xn_seg[i] = np.mean(X_seg)
# calculate errors
phicoef_tta = np.zeros(X_tta.shape[0])
xn_tta = np.zeros(X_tta.shape[0])
for k in tqdm(range(len(X_tta))):
xn_tta[k] = np.mean(np.round(X_tta[k]))
phicoef_tta[k] = matthews_corrcoef(mask_xn.flatten(), np.round(X_tta[k]).flatten())
xn_err[i] = np.std(xn_tta)
phicoef_err[i] = np.std(phicoef_tta)
# Calculate Betti number
b0_seg[i] = t2c.betti0(data=X_seg)
b1_seg[i] = t2c.betti1(data=X_seg)
b2_seg[i] = t2c.betti2(data=X_seg)
# --------------------------------------------
# -------- predict with Super-Pixel --------
labels = t2c.slic_cube(dT3.astype(dtype='float64'), n_segments=5000, compactness=0.1, max_iter=20, sigma=0, min_size_factor=0.5, max_size_factor=3, cmap=None)
superpixel_map = t2c.superpixel_map(dT3, labels)
X_sp = 1-t2c.stitch_superpixels(dT3, labels, bins='knuth', binary=True, on_superpixel_map=True)
# calculate Matthew coef and mean neutral fraction
phicoef_sp[i] = matthews_corrcoef(mask_xn.flatten(), X_sp.flatten())
xn_sp[i] = np.mean(X_sp)
# Calculate Betti number
b0_sp[i] = t2c.betti0(data=X_sp)
b1_sp[i] = t2c.betti1(data=X_sp)
b2_sp[i] = t2c.betti2(data=X_sp)
# --------------------------------------------
if(i in new_idx and MAKE_PLOTS):
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['figure.figsize'] = [20, 10]
idx = params['HII_DIM']//2
# Plot visual comparison
fig, axs = plt.subplots(figsize=(20,10), ncols=3, sharey=True, sharex=True)
(ax0, ax1, ax2) = axs
ax0.set_title('Super-Pixel ($r_{\phi}=%.3f$)' %phicoef_sp[i], size=ls)
ax0.imshow(X_sp[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
ax0.contour(mask_xn[:,:,idx], colors='lime', levels=[0.5], extent=my_ext)
ax0.set_xlabel('x [Mpc]'), ax0.set_ylabel('y [Mpc]')
ax1.set_title('SegU-Net ($r_{\phi}=%.3f$)' %phicoef_seg[i], size=ls)
ax1.imshow(X_seg[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
ax1.contour(mask_xn[:,:,idx], colors='lime', levels=[0.5], extent=my_ext)
ax1.set_xlabel('x [Mpc]')
ax2.set_title('SegUNet Pixel-Error', size=ls)
im = plt.imshow(X_seg_err[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
fig.colorbar(im, label=r'$\sigma_{std}$', ax=ax2, pad=0.02, cax=fig.add_axes([0.905, 0.25, 0.02, 0.51]))
ax2.set_xlabel('x [Mpc]')
plt.subplots_adjust(hspace=0.1, wspace=0.01)
for ax in axs.flat: ax.label_outer()
plt.savefig('%svisual_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
# Plot BSD-MFP of the prediction
mfp_pred_ml = t2c.bubble_stats.mfp(X_seg, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_pred_sp = t2c.bubble_stats.mfp(X_sp, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_true = t2c.bubble_stats.mfp(mask_xn, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_tta = np.zeros((X_tta.shape[0], 2, 128))
for j in tqdm(range(0, X_tta.shape[0])):
mfp_pred_ml1, mfp_pred_ml2 = t2c.bubble_stats.mfp(np.round(X_tta[j]), xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_tta[j,0] = mfp_pred_ml1
mfp_tta[j,1] = mfp_pred_ml2
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
compare_ml = (mfp_pred_ml[1]/mfp_true[1])
compare_ml_tta = (mfp_tta[:,1,:]/mfp_true[1])
compare_sp = (mfp_pred_sp[1]/mfp_true[1])
fig, ax0 = plt.subplots(figsize=(12, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1.8]) # set height ratios for sublots
ax0 = plt.subplot(gs[0])
ax0.set_title('$z=%.3f$\t$x_n=%.3f$\t$r_{\phi}=%.3f$' %(z, xn_mask[i], phicoef_seg[i]), fontsize=ls)
ax0.fill_between(mfp_pred_ml[0], np.min(mfp_tta[:,1,:], axis=0), np.max(mfp_tta[:,1,:], axis=0), color='tab:blue', alpha=0.2)
ax0.loglog(mfp_pred_ml[0], mfp_pred_ml[1], '-', color='tab:blue', label='SegUNet', lw=2)
ax0.loglog(mfp_pred_sp[0], mfp_pred_sp[1], '-', color='tab:orange', label='Super-Pixel', lw=2)
ax0.loglog(mfp_true[0], mfp_true[1], 'k--', label='Ground true', lw=2)
ax0.legend(loc=0, borderpad=0.5)
ax0.tick_params(axis='both', length=7, width=1.2)
ax0.tick_params(axis='both', which='minor', length=5, width=1.2)
ax0.set_ylabel('RdP/dR', size=18), ax0.set_xlabel('R (Mpc)')
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.loglog(mfp_true[0], compare_ml, '-', lw=2)
ax1.loglog(mfp_true[0], compare_sp, '-', lw=2)
ax1.loglog(mfp_true[0], np.ones_like(mfp_true[0]), 'k--', lw=2)
ax1.fill_between(mfp_true[0], np.min(compare_ml_tta, axis=0), np.max(compare_ml_tta, axis=0), color='tab:blue', alpha=0.2)
ax1.tick_params(axis='both', length=7, width=1.2, labelsize=15)
ax1.set_ylabel('difference (%)', size=15)
ax1.set_xlabel('R (Mpc)', size=18)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
ax1.tick_params(which='minor', axis='both', length=5, width=1.2)
plt.savefig('%sbs_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
# Plot dimensioneless power spectra of the x field
ps_true, ks_true = t2c.power_spectrum_1d(mask_xn, kbins=20, box_dims=256, binning='log')
ps_pred_sp, ks_pred_sp = t2c.power_spectrum_1d(X_sp, kbins=20, box_dims=256, binning='log')
ps_pred_ml, ks_pred_ml = t2c.power_spectrum_1d(X_seg, kbins=20, box_dims=256, binning='log')
ps_tta = np.zeros((X_tta.shape[0],20))
for k in range(0,X_tta.shape[0]):
ps_tta[k], ks_pred_ml = t2c.power_spectrum_1d(np.round(X_tta[k]), kbins=20, box_dims=256, binning='log')
compare_ml = 100*(ps_pred_ml/ps_true - 1.)
compare_ml_tta = 100*(ps_tta/ps_true - 1.)
compare_sp = 100*(ps_pred_sp/ps_true - 1.)
fig, ax = plt.subplots(figsize=(16, 12))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1.8])
ax0 = plt.subplot(gs[0])
ax0.set_title('$z=%.3f$\t$x_n=%.3f$\t$r_{\phi}=%.3f$' %(z, xn_mask[i], phicoef_seg[i]), fontsize=ls)
ax0.fill_between(ks_pred_ml, np.min(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), np.max(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), color='tab:blue', alpha=0.2)
ax0.loglog(ks_pred_ml, ps_pred_ml*ks_pred_ml**3/2/np.pi**2, '-', color='tab:blue', label='SegUNet', lw=2)
ax0.loglog(ks_pred_sp, ps_pred_sp*ks_pred_sp**3/2/np.pi**2, '-', color='tab:orange', label='Super-Pixel', lw=2)
ax0.loglog(ks_true, ps_true*ks_true**3/2/np.pi**2, 'k--', label='Ground true', lw=2)
ax0.set_yscale('log')
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.semilogx(ks_true, compare_ml, '-', lw=2)
ax1.semilogx(ks_true, compare_sp, '-', lw=2)
ax1.semilogx(ks_true, np.zeros_like(ks_true), 'k--', lw=2)
ax1.fill_between(ks_true, np.min(compare_ml_tta, axis=0), np.max(compare_ml_tta, axis=0), color='tab:blue', alpha=0.2)
ax1.tick_params(axis='both', length=7, width=1.2, labelsize=15)
ax1.set_xlabel('k (Mpc$^{-1}$)'), ax0.set_ylabel('$\Delta^2_{xx}$')
ax1.set_ylabel('difference (%)', size=15)
ax0.tick_params(axis='both', length=10, width=1.2)
ax0.tick_params(which='minor', axis='both', length=5, width=1.2)
ax1.tick_params(which='minor', axis='both', length=5, width=1.2)
ax0.legend(loc=0, borderpad=0.5)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
plt.savefig('%sPk_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
ds_data = np.vstack((ks_true, np.vstack((ps_true*ks_true**3/2/np.pi**2, np.vstack((np.vstack((ps_pred_ml*ks_pred_ml**3/2/np.pi**2, np.vstack((np.min(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), np.max(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0))))), ps_pred_sp*ks_pred_sp**3/2/np.pi**2))))))
bsd_data = np.vstack((mfp_true[0], np.vstack((mfp_true[1], np.vstack((np.vstack((mfp_pred_ml[1], np.vstack((np.min(mfp_tta[:,1,:], axis=0), np.max(mfp_tta[:,1,:], axis=0))))), mfp_pred_sp[1]))))))
np.savetxt('%sds_data_i%d.txt' %(PATH_OUT+'data/', i), ds_data.T, fmt='%.6e', delimiter='\t', header='k [Mpc^-1]\tds_true\tds_seg_mean\tds_err_min\tds_err_max\tds_sp')
np.savetxt('%sbsd_data_i%d.txt' %(PATH_OUT+'data/', i), bsd_data.T, fmt='%.6e', delimiter='\t', header='R [Mpc]\tbs_true\tbs_seg_mean\tb_err_min\tbs_err_max\tbs_sp')
new_astr_data = np.vstack((astr_data, phicoef_seg))
new_astr_data = np.vstack((new_astr_data, phicoef_err))
new_astr_data = np.vstack((new_astr_data, phicoef_sp))
new_astr_data = np.vstack((new_astr_data, xn_mask))
new_astr_data = np.vstack((new_astr_data, xn_seg))
new_astr_data = np.vstack((new_astr_data, xn_err))
new_astr_data = np.vstack((new_astr_data, xn_sp))
new_astr_data = np.vstack((new_astr_data, b0_true))
new_astr_data = np.vstack((new_astr_data, b1_true))
new_astr_data = np.vstack((new_astr_data, b2_true))
new_astr_data = np.vstack((new_astr_data, b0_seg))
new_astr_data = np.vstack((new_astr_data, b1_seg))
new_astr_data = np.vstack((new_astr_data, b2_seg))
new_astr_data = np.vstack((new_astr_data, b0_sp))
new_astr_data = np.vstack((new_astr_data, b1_sp))
new_astr_data = np.vstack((new_astr_data, b2_sp))
np.savetxt('%sastro_data.txt' %(PATH_OUT), new_astr_data.T, fmt='%d\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d', header='i\tz\teff_f\tRmfp\tTvir\tx_n\tphi_ML\tphi_err phi_SP\txn_mask xn_seg\txn_err\txn_sp\tb0 true b1\tb2\tb0 ML\tb1\tb2\tb0 SP\tb1\tb2')
np.savetxt('%sastro_data_sample.txt' %(PATH_OUT+'data/'), new_astr_data[:,new_idx].T, fmt='%d\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d', header='i\tz\teff_f\tRmfp\tTvir\tx_n\tphi_ML\tphi_err phi_SP\txn_mask xn_seg\txn_err\txn_sp\tb0 true b1\tb2\tb0 ML\tb1\tb2\tb0 SP\tb1\tb2')
# Plot phi coeff
plt.rcParams['font.size'] = 16
redshift, xfrac, phicoef_seg, phicoef_seg_err, phicoef_sp, xn_mask_true, xn_seg, xn_seg_err, xn_sp = OrderNdimArray(np.loadtxt(PATH_OUT+'astro_data.txt', unpack=True, usecols=(1,5,6,7,8,9,10,11,12)), 1)
print('phi_coef = %.3f +/- %.3f\t(SegUnet)' %(np.mean(phicoef_seg), np.std(phicoef_seg)))
print('phi_coef = %.3f\t\t(Superpixel)' %(np.mean(phicoef_sp)))
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20,8))
#ax0.hlines(y=np.mean(phicoef_seg), xmin=0, xmax=1, ls='--', alpha=0.5)
#ax0.fill_between(x=np.linspace(0, 1, 100), y1=np.mean(phicoef_seg)+np.std(phicoef_seg), y2=np.mean(phicoef_seg)-np.std(phicoef_seg), alpha=0.5, color='lightgray')
# MCC SegUnet
cm = matplotlib.cm.plasma
sc = ax0.scatter(xfrac, phicoef_seg, c=redshift, vmin=7, vmax=9, s=25, cmap=cm, marker='.')
norm = matplotlib.colors.Normalize(vmin=7, vmax=9, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cm)
redshift_color = np.array([(mapper.to_rgba(v)) for v in redshift])
for x, y, e, clr in zip(xfrac, phicoef_seg, phicoef_seg_err, redshift_color):
ax0.errorbar(x, y, e, lw=1, marker='o', capsize=3, color=clr)
ax0.set_xlim(xfrac.min()-0.02, xfrac.max()+0.02), ax0.set_xlabel(r'$x_i$')
ax0.set_ylim(-0.02, 1.02), ax0.set_ylabel(r'$r_{\phi}$')
fig.colorbar(sc, ax=ax0, pad=0.01, label=r'$z$')
ax2 = ax0.twinx()
ax2.hist(xfrac, np.linspace(0.09, 0.81, 15), density=True, histtype='step', color='tab:blue', alpha=0.5)
ax2.axes.get_yaxis().set_visible(False)
# MCC comparison
ax1.hlines(y=np.mean(phicoef_seg), xmin=0, xmax=1, ls='--', alpha=0.5, color='tab:blue')
ax1.hlines(y=np.mean(phicoef_sp), xmin=0, xmax=1, ls='--', alpha=0.5, color='tab:orange')
new_x = np.linspace(xfrac.min(), xfrac.max(), 100)
f1 = np.poly1d(np.polyfit(xfrac, phicoef_seg, 10))
ax1.plot(new_x, f1(new_x), label='SegUnet', color='tab:blue')
f2 = np.poly1d(np.polyfit(xfrac, phicoef_sp, 10))
ax1.plot(new_x, f2(new_x), label='Super-Pixel', color='tab:orange')
ax1.set_xlim(xfrac.min()-0.02, xfrac.max()+0.02), ax1.set_xlabel(r'$x_i$')
ax1.set_ylim(-0.02, 1.02), ax1.set_ylabel(r'$r_{\phi}$')
ax1.legend(loc=4)
plt.savefig('%sphi_coef.png' %PATH_OUT, bbox_inches="tight"), plt.clf()
# Plot correlation
fig, (ax0, ax1) = plt.subplots(ncols=2)
ax0.plot(xn_mask_true, xn_mask_true, 'k--')
cm = matplotlib.cm.plasma
sc = ax0.scatter(xn_mask_true, xn_seg, c=redshift, vmin=7, vmax=9, s=25, cmap=cm, marker='.')
norm = matplotlib.colors.Normalize(vmin=7, vmax=9, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap='plasma')
redshift_color = np.array([(mapper.to_rgba(v)) for v in redshift])
for x, y, e, clr in zip(xn_mask_true, xn_seg, xn_seg_err, redshift_color):
ax0.errorbar(x, y, e, lw=1, marker='o', capsize=3, color=clr)
ax0.set_xlim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax0.set_xlabel(r'$\rm x_{n,\,true}$')
ax0.set_ylim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax0.set_ylabel(r'$\rm x_{n,\,predict}$')
fig.colorbar(sc, ax=ax0, pad=0.01, label=r'$z$')
ax1.plot(xn_mask_true, xn_mask_true, 'k--', label='Ground True')
ax1.scatter(xn_mask_true, xn_seg, color='tab:blue', marker='o', label='SegUnet')
ax1.scatter(xn_mask_true, xn_sp, color='tab:orange', marker='o', label='Super-Pixel')
ax1.set_xlim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax1.set_xlabel(r'$\rm x_{n,\,true}$')
ax1.set_ylim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax1.set_ylabel(r'$\rm x_{n,\,predict}$')
plt.legend(loc=4)
plt.savefig('%scorr.png' %PATH_OUT, bbox_inches="tight"), plt.clf()
# Betti numbers plot
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(23,5), sharex=True)
h = np.histogram(xn_mask_true, np.linspace(1e-5, 1., 20), density=True)
new_x = h[1][:-1]+0.5*(h[1][1:]-h[1][:-1])
# Betti 0
f_b0_true = np.array([np.mean(b0_true[(xn_mask_true < h[1][i+1]) * (xn_mask_true >= h[1][i])]) for i in range(h[1].size-1)])
ax0.plot(new_x, f_b0_true, 'k--', label='Ground True')
f_b0_seg = np.array([np.mean(b0_seg[(xn_mask_true < h[1][i+1]) * (xn_mask_true >= h[1][i])]) for i in range(h[1].size-1)])
ax0.plot(new_x, f_b0_seg, label='SegUnet', color='tab:blue', marker='o')
f_b0_sp = np.array([np.mean(b0_sp[(xn_mask_true < h[1][i+1]) * (xn_mask_true >= h[1][i])]) for i in range(h[1].size-1)])
ax0.plot(new_x, f_b0_sp, label='Super-Pixel', color='tab:orange', marker='o')
ax0.legend(loc=1)
ax0.set_xlabel(r'$\rm x^v_{HI}$', size=20), ax0.set_ylabel(r'$\rm\beta_0$', size=20)
# Betti 1
f_b1_true = np.array([ | np.mean(b1_true[(xn_mask_true < h[1][i+1]) * (xn_mask_true >= h[1][i])]) | numpy.mean |
# -*- coding: utf-8 -*-
import unittest
import numpy
"""
*******************************************************************************
Tests of the quantarhei.qm.LindbladForm class
*******************************************************************************
"""
from quantarhei.qm import LindbladForm
from quantarhei.qm import ElectronicLindbladForm
from quantarhei.qm import Operator
from quantarhei.qm import SystemBathInteraction
from quantarhei.qm import ReducedDensityMatrixPropagator
from quantarhei.qm import ReducedDensityMatrix
from quantarhei.qm import ProjectionOperator
from quantarhei import Hamiltonian
from quantarhei import energy_units
from quantarhei import TimeAxis
from quantarhei import eigenbasis_of, Manager
class TestLindblad(unittest.TestCase):
"""Tests for the LindbladForm class
"""
def setUp(self,verbose=False):
self.verbose = verbose
#
# Lindblad projection operators
#
K12 = numpy.array([[0.0, 1.0],[0.0, 0.0]],dtype=numpy.float)
K21 = numpy.array([[0.0, 0.0],[1.0, 0.0]],dtype=numpy.float)
KK12 = Operator(data=K12)
KK21 = Operator(data=K21)
self.KK12 = KK12
self.KK21 = KK21
#
# Linbdlad rates
#
self.rates = (1.0/100.0, 1.0/200.0)
#
# System-bath interaction using operators and rates in site basis
#
self.sbi1 = SystemBathInteraction([KK12,KK21],
rates=self.rates)
self.sbi2 = SystemBathInteraction([KK12,KK21],
rates=self.rates)
#
# Test Hamiltonians
#
with energy_units("1/cm"):
h1 = [[100.0, 0.0],[0.0, 0.0]]
h2 = [[100.0, 0.0],[0.0, 0.0]]
self.H1 = Hamiltonian(data=h1)
self.H2 = Hamiltonian(data=h2)
h3 = [[100.0, 20.0],[20.0, 0.0]]
self.H3 = Hamiltonian(data=h3)
# less trivial Hamiltonian
h4 = [[100.0, 200.0, 30.0 ],
[200.0, 50.0, -100.0],
[30.0, -100.0, 0.0 ]]
self.H4 = Hamiltonian(data=h4)
h4s = [[100.0, 0.0, 0.0 ],
[0.0, 50.0, 0.0],
[0.0, 0.0, 0.0 ]]
self.H4s = Hamiltonian(data=h4s)
#
# Projection operators in eigenstate basis
#
with eigenbasis_of(self.H3):
K_12 = ProjectionOperator(0, 1, dim=2)
K_21 = ProjectionOperator(1, 0, dim=2)
self.K_12 = K_12
self.K_21 = K_21
with eigenbasis_of(self.H4):
Ke_12 = ProjectionOperator(0, 1, dim=3)
Ke_21 = ProjectionOperator(1, 0, dim=3)
Ke_23 = ProjectionOperator(1, 2, dim=3)
Ke_32 = ProjectionOperator(2, 1, dim=3)
Ks_12 = ProjectionOperator(0, 1, dim=3)
Ks_21 = ProjectionOperator(1, 0, dim=3)
Ks_23 = ProjectionOperator(1, 2, dim=3)
Ks_32 = ProjectionOperator(2, 1, dim=3)
self.rates4 = [1.0/100, 1.0/200, 1.0/150, 1.0/300]
#
# System-bath operators defined in exciton basis
#
self.sbi3 = SystemBathInteraction([K_12, K_21],
rates=self.rates)
self.sbi4e = SystemBathInteraction([Ke_12, Ke_21, Ke_23, Ke_32],
rates=self.rates4)
self.sbi4s = SystemBathInteraction([Ks_12, Ks_21, Ks_23, Ks_32],
rates=self.rates4)
def test_comparison_of_rates(self):
"""Testing that Lindblad tensor and rate matrix are compatible
"""
tensor = True
# matrix = True
dim = self.H1.dim
KT = numpy.zeros((dim,dim), dtype=numpy.float64)
KM = numpy.zeros((dim,dim), dtype=numpy.float64)
if tensor:
#print(self.H1)
LT = LindbladForm(self.H1, self.sbi1, as_operators=False)
for n in range(2):
for m in range(2):
#print(n,m,numpy.real(RT.data[n,n,m,m]))
KT[n,m] = numpy.real(LT.data[n,n,m,m])
KM = numpy.zeros((dim,dim))
KM[0,0] = -self.rates[1]
KM[1,1] = -self.rates[0]
KM[0,1] = self.rates[0]
KM[1,0] = self.rates[1]
numpy.testing.assert_allclose(KT,KM, rtol=1.0e-2)
def test_comparison_of_dynamics(self):
"""Testing site basis dynamics by Lindblad
"""
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False)
time = TimeAxis(0.0, 1000, 1.0)
prop1 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop2 = ReducedDensityMatrixPropagator(time, self.H1, LT2)
rho0 = ReducedDensityMatrix(dim=self.H1.dim)
rho0.data[1,1] = 1.0
rhot1 = prop1.propagate(rho0)
rhot2 = prop2.propagate(rho0)
numpy.testing.assert_allclose(rhot1.data,rhot2.data) #, rtol=1.0e-2)
def test_propagation_in_different_basis(self):
"""(LINDBLAD) Testing comparison of propagations in different bases
"""
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False)
time = TimeAxis(0.0, 1000, 1.0)
prop1 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop2 = ReducedDensityMatrixPropagator(time, self.H1, LT2)
rho0 = ReducedDensityMatrix(dim=self.H1.dim)
rho0.data[1,1] = 1.0
with eigenbasis_of(self.H1):
rhot1_e = prop1.propagate(rho0)
with eigenbasis_of(self.H1):
rhot2_e = prop2.propagate(rho0)
rhot1_l = prop1.propagate(rho0)
rhot2_l = prop2.propagate(rho0)
numpy.testing.assert_allclose(rhot1_l.data, rhot1_e.data)
numpy.testing.assert_allclose(rhot2_l.data, rhot2_e.data)
numpy.testing.assert_allclose(rhot1_e.data, rhot2_e.data) #, rtol=1.0e-2)
def test_transformation_in_different_basis(self):
"""(LINDBLAD) Testing transformations into different bases
"""
#Manager().warn_about_basis_change = True
#Manager().warn_about_basis_changing_objects = True
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True, name="LT1")
LT2 = LindbladForm(self.H1, self.sbi1, as_operators=False, name="LT2")
rho0 = ReducedDensityMatrix(dim=self.H1.dim, name="ahoj")
with eigenbasis_of(self.H1):
rho0.data[1,1] = 0.7
rho0.data[0,0] = 0.3
with eigenbasis_of(self.H1):
rhot1_e = LT1.apply(rho0, copy=True)
with eigenbasis_of(self.H1):
rhot2_e = LT2.apply(rho0, copy=True)
rhot1_l = LT1.apply(rho0, copy=True)
rhot2_l = LT2.apply(rho0, copy=True)
numpy.testing.assert_allclose(rhot1_l.data, rhot1_e.data)
numpy.testing.assert_allclose(rhot2_l.data, rhot2_e.data)
numpy.testing.assert_allclose(rhot1_e.data, rhot2_e.data) #, rtol=1.0e-2)
def test_comparison_of_exciton_dynamics(self):
"""Testing exciton basis dynamics by Lindblad
"""
# site basis form to be compared with
LT1 = LindbladForm(self.H1, self.sbi1, as_operators=True)
# exciton basis forms
LT13 = LindbladForm(self.H3, self.sbi3, as_operators=True)
LT23 = LindbladForm(self.H3, self.sbi3, as_operators=False)
LT4e = LindbladForm(self.H4, self.sbi4e, as_operators=True)
LT4s = LindbladForm(self.H4s, self.sbi4s, as_operators=True)
time = TimeAxis(0.0, 1000, 1.0)
#
# Propagators
#
prop0 = ReducedDensityMatrixPropagator(time, self.H1, LT1)
prop1 = ReducedDensityMatrixPropagator(time, self.H3, LT13)
prop2 = ReducedDensityMatrixPropagator(time, self.H3, LT23)
prop4e = ReducedDensityMatrixPropagator(time, self.H4, LT4e)
prop4s = ReducedDensityMatrixPropagator(time, self.H4s, LT4s)
#
# Initial conditions
#
rho0 = ReducedDensityMatrix(dim=self.H3.dim)
rho0c = ReducedDensityMatrix(dim=self.H1.dim) # excitonic
with eigenbasis_of(self.H3):
rho0c.data[1,1] = 1.0
rho0.data[1,1] = 1.0
rho04e = ReducedDensityMatrix(dim=self.H4.dim)
rho04s = ReducedDensityMatrix(dim=self.H4.dim)
with eigenbasis_of(self.H4):
rho04e.data[2,2] = 1.0
rho04s.data[2,2] = 1.0
#
# Propagations
#
rhotc = prop0.propagate(rho0c)
rhot1 = prop1.propagate(rho0)
rhot2 = prop2.propagate(rho0)
rhot4e = prop4e.propagate(rho04e)
rhot4s = prop4s.propagate(rho04s)
# propagation with operator- and tensor forms should be the same
numpy.testing.assert_allclose(rhot1.data,rhot2.data) #, rtol=1.0e-2)
#
# Population time evolution by Lindblad is independent
# of the level structure and basis, as long as I compare
# populations in basis in which the Lindblad form was defined
#
P = numpy.zeros((2, time.length))
Pc = numpy.zeros((2, time.length))
P4e = numpy.zeros((3, time.length))
P4s = numpy.zeros((3, time.length))
with eigenbasis_of(self.H3):
for i in range(time.length):
P[0,i] = numpy.real(rhot1.data[i,0,0]) # population of exciton 0
P[1,i] = numpy.real(rhot1.data[i,1,1]) # population of exciton 1
for i in range(time.length):
Pc[0,i] = numpy.real(rhotc.data[i,0,0]) # population of exciton 0
Pc[1,i] = numpy.real(rhotc.data[i,1,1]) # population of exciton 1
# we compare populations
numpy.testing.assert_allclose(Pc,P) #, rtol=1.0e-2)
with eigenbasis_of(self.H4):
for i in range(time.length):
P4e[0,i] = numpy.real(rhot4e.data[i,0,0]) # population of exciton 0
P4e[1,i] = numpy.real(rhot4e.data[i,1,1]) # population of exciton 1
P4e[2,i] = | numpy.real(rhot4e.data[i,2,2]) | numpy.real |
import os
import time
import pandas as pd
import numpy as np
from sys import platform
import matplotlib as mpl
if platform == "darwin": # OS X
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from datetime import datetime
from github import Github
from github import GithubException
from services import gh_api
LOWER_BOUND = 1.791759469228055
UPPER_BOUND = 42.40071186221785
TIMESTAMP_LOWER_BOUND = "2012-12-12 17:51:25"
images_folder = "images-cache"
csv_folder = "data/repositories-timeseries.csv"
data = None
'''
Create some plots that contains chronological data for a repository:
- stars count
- forks count
- watchers count
- contributors count
- rating
The intended dataframe is the one stored in: resources/repositories-timeseries.csv
The np arrays can be composed of either four or five elements:
- the first four are the historical data points of the repository
- the fifth point is fetched via the github api; this can most likely fail due to:
- error 404: the repository does not exist anymore
- error 502: temporary github api problem
- exceeded the api call limit
Also fetches some recent commits, closed issues and open issues and plots them.
Note: it is assumed that the repository name that is given as a input to the fuctions
does actually exist in the dataframe
(there is no problem if it does not exist on github)
'''
def _get_stars_count(github_client, repository_name):
'''
Get the Stars Count for a given repository
'''
try:
repo = github_client.get_repo(repository_name)
count = repo.stargazers_count
return count
except GithubException as error:
if error.status == 404:
return None
# most likely a 502
else:
time.sleep(1)
try:
repo = github_client.get_repo(repository_name)
count = repo.stargazers_count
return count
except GithubException as error:
return None
def _get_forks_count(github_client, repository_name):
'''
Get the Forks Count for a given repository
'''
try:
repo = github_client.get_repo(repository_name)
count = repo.forks_count
return count
except GithubException as error:
if error.status == 404:
return None
# most likely a 502
else:
time.sleep(1)
try:
repo = github_client.get_repo(repository_name)
count = repo.forks_count
return count
except GithubException as error:
return None
def _get_watchers_count(github_client, repository_name):
'''
Get the Watchers Count for a given repository
'''
try:
repo = github_client.get_repo(repository_name)
count = repo.subscribers_count
return count
except GithubException as error:
if error.status == 404:
return None
# most likely a 502
else:
time.sleep(1)
try:
repo = github_client.get_repo(repository_name)
count = repo.subscribers_count
return count
except GithubException as error:
return None
def _get_contributors_count(github_client, repository_name):
'''
Get the Contributors Count for a given repository
'''
try:
repo = github_client.get_repo(repository_name)
count = repo.get_contributors().totalCount
return count
except GithubException as error:
if error.status == 404:
return None
# most likely a 502
else:
time.sleep(1)
try:
repo = github_client.get_repo(repository_name)
count = repo.get_contributors().totalCount
return count
except GithubException as error:
return None
def _get_rating(github_client, repository_name):
'''
Determine the Rating for a given repository
'''
try:
repo = github_client.get_repo(repository_name)
star_count = repo.stargazers_count
fork_count = repo.forks_count
contributor_count = repo.get_contributors().totalCount
watchers_count = repo.subscribers_count
open_issues = repo.get_issues(state = 'open').totalCount
updated_timestamp = repo.updated_at
upd_timestamp = (updated_timestamp - datetime.strptime(TIMESTAMP_LOWER_BOUND, '%Y-%m-%d %H:%M:%S')).days
has_pages = 0
for branch in repo.get_branches():
if branch.name == "gh-pages":
has_pages = 1
break
rating = has_pages + int(repo.has_issues) + int(repo.has_wiki) - int(repo.fork) +\
np.log(star_count + 1) + np.log(fork_count + 1) + np.log(contributor_count + 1) +\
np.log(watchers_count + 1) - np.log(open_issues + 1) + np.log(upd_timestamp + 1)
rating = (rating - LOWER_BOUND) / (UPPER_BOUND - LOWER_BOUND)
rating = round(rating * 5, 2)
if rating > 5:
rating = 5
elif rating < 0:
rating = 0
return rating
except GithubException as error:
if error.status == 404:
return None
# most likely a 502
else:
time.sleep(1)
try:
repo = github_client.get_repo(repository_name)
star_count = repo.stargazers_count
fork_count = repo.forks_count
contributor_count = repo.get_contributors().totalCount
watchers_count = repo.subscribers_count
open_issues = repo.get_issues(state = 'open').totalCount
updated_timestamp = repo.updated_at
upd_timestamp = (updated_timestamp - datetime.strptime(TIMESTAMP_LOWER_BOUND, '%Y-%m-%d %H:%M:%S')).days
has_pages = 0
for branch in repo.get_branches():
if branch.name == "gh-pages":
has_pages = 1
break
rating = has_pages + int(repo.has_issues) + int(repo.has_wiki) - int(repo.fork) +\
np.log(star_count + 1) + np.log(fork_count + 1) + np.log(contributor_count + 1) +\
np.log(watchers_count + 1) - np.log(open_issues + 1) + np.log(upd_timestamp + 1)
rating = (rating - LOWER_BOUND) / (UPPER_BOUND - LOWER_BOUND)
rating = round(rating * 5, 2)
if rating > 5:
rating = 5
elif rating < 0:
rating = 0
return rating
except GithubException as error:
return None
def get_stars_count_timeseries(dataframe, github_client, repository_name):
'''
Returns the Stars Count Timeseries for a given repository
'''
# get the historical data
timeseries = dataframe[dataframe["Name with Owner"] == repository_name]["Stars Count_1"].values
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Stars Count_2"].values)
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Stars Count_3"].values)
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Stars Count_4"].values)
try:
# try to get the latest value from the repository via the github api
stars_count = _get_stars_count(github_client, repository_name)
if stars_count is not None:
timeseries = np.append(timeseries, stars_count)
# most likely due to a socket timeout caused by running out of github api calls
except: pass
return timeseries
def get_forks_count_timeseries(dataframe, github_client, repository_name):
'''
Returns the Forks Count Timeseries for a given repository
'''
# get the historical data
timeseries = dataframe[dataframe["Name with Owner"] == repository_name]["Forks Count_1"].values
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Forks Count_2"].values)
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Forks Count_3"].values)
timeseries = np.append(timeseries,
dataframe[dataframe["Name with Owner"] == repository_name]["Forks Count_4"].values)
try:
# try to get the latest value from the repository via the github api
forks_count = _get_forks_count(github_client, repository_name)
if forks_count is not None:
timeseries = | np.append(timeseries, forks_count) | numpy.append |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from scipy.sparse.linalg import spsolve
import numpy as np
from GridCal.Engine import compile_snapshot_circuit, SnapshotData, TransformerControlType, ConverterControlType, FileOpen
import os
import time
from scipy.sparse import lil_matrix, diags
import scipy.sparse as sp
def determine_branch_indices(circuit: SnapshotData):
"""
This function fills in the lists of indices to control different magnitudes
:param circuit: Instance of AcDcSnapshotCircuit
:returns idx_sh, idx_qz, idx_vf, idx_vt, idx_qt
VSC Control modes:
in the paper's scheme:
from -> DC
to -> AC
| Mode | const.1 | const.2 | type |
-------------------------------------------------
| 1 | theta | Vac | I |
| 2 | Pf | Qac | I |
| 3 | Pf | Vac | I |
-------------------------------------------------
| 4 | Vdc | Qac | II |
| 5 | Vdc | Vac | II |
-------------------------------------------------
| 6 | Vdc droop | Qac | III |
| 7 | Vdc droop | Vac | III |
-------------------------------------------------
Indices where each control goes:
mismatch → | ∆Pf Qf Q@f Q@t ∆Qt
variable → | Ɵsh Beq m m Beq
Indices → | Ish Iqz Ivf Ivt Iqt
------------------------------------
VSC 1 | - 1 - 1 - | AC voltage control (voltage “to”)
VSC 2 | 1 1 - - 1 | Active and reactive power control
VSC 3 | 1 1 - 1 - | Active power and AC voltage control
VSC 4 | - - 1 - 1 | Dc voltage and Reactive power flow control
VSC 5 | - - - 1 1 | Ac and Dc voltage control
------------------------------------
Transformer 0| - - - - - | Fixed transformer
Transformer 1| 1 - - - - | Phase shifter → controls power
Transformer 2| - - 1 - - | Control the voltage at the “from” side
Transformer 3| - - - 1 - | Control the voltage at the “to” side
Transformer 4| 1 - 1 - - | Control the power flow and the voltage at the “from” side
Transformer 5| 1 - - 1 - | Control the power flow and the voltage at the “to” side
------------------------------------
"""
# indices in the global branch scheme
iPfsh = list() # indices of the branches controlling Pf flow
iQfma = list()
iBeqz = list() # indices of the branches when forcing the Qf flow to zero (aka "the zero condition")
iBeqv = list() # indices of the branches when controlling Vf
iVtma = list() # indices of the branches when controlling Vt
iQtma = list() # indices of the branches controlling the Qt flow
iPfdp = list()
iVscL = list() # indices of the converters
for k, tpe in enumerate(circuit.branch_data.control_mode):
if tpe == TransformerControlType.fixed:
pass
elif tpe == TransformerControlType.power:
iPfsh.append(k)
elif tpe == TransformerControlType.v_to:
iVtma.append(k)
elif tpe == TransformerControlType.power_v_to:
iPfsh.append(k)
iVtma.append(k)
# VSC ----------------------------------------------------------------------------------------------------------
elif tpe == ConverterControlType.type_1_free: # 1a:Free
iBeqz.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_1_pf: # 1b:Pflow
iPfsh.append(k)
iBeqz.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_1_qf: # 1c:Qflow
iBeqz.append(k)
iQtma.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_1_vac: # 1d:Vac
iBeqz.append(k)
iVtma.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_2_vdc: # 2a:Vdc
iPfsh.append(k)
iBeqv.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_2_vdc_pf: # 2b:Vdc+Pflow
iPfsh.append(k)
iBeqv.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_3: # 3a:Droop
iPfsh.append(k)
iBeqz.append(k)
iPfdp.append(k)
iVscL.append(k)
elif tpe == ConverterControlType.type_4: # 4a:Droop-slack
iPfdp.append(k)
iVscL.append(k)
elif tpe == 0:
pass # required for the no-control case
else:
raise Exception('Unknown control type:' + str(tpe))
# FUBM- Saves the "from" bus identifier for Vf controlled by Beq
# (Converters type II for Vdc control)
VfBeqbus = circuit.F[iBeqv]
# FUBM- Saves the "to" bus identifier for Vt controlled by ma
# (Converters and Transformers)
Vtmabus = circuit.T[iVtma]
return iPfsh, iQfma, iBeqz, iBeqv, iVtma, iQtma, iPfdp, iVscL, VfBeqbus, Vtmabus
def compute_converter_losses(V, It, F, alpha1, alpha2, alpha3, iVscL):
"""
Compute the converter losses according to the IEC 62751-2
:param V:
:param It:
:param F:
:param alpha1:
:param alpha2:
:param alpha3:
:param iVscL:
:return:
"""
# FUBM- Standard IEC 62751-2 Ploss Correction for VSC losses
Ivsc = np.abs(It[iVscL])
PLoss_IEC = alpha3[iVscL] * np.power(Ivsc, 2)
PLoss_IEC += alpha2[iVscL] * np.power(Ivsc, 2)
PLoss_IEC += alpha1[iVscL]
# compute G-switch
Gsw = np.zeros(len(F))
Gsw[iVscL] = PLoss_IEC / np.power(np.abs(V[F[iVscL]]), 2) # FUBM- VSC Gsw
return Gsw
def compile_y_acdc(branch_active, Cf, Ct, C_bus_shunt, shunt_admittance, shunt_active, ys, B, Sbase,
m, theta, Beq, Gsw):
"""
Compile the admittance matrices using the variable elements
:param branch_active:
:param Cf:
:param Ct:
:param C_bus_shunt:
:param shunt_admittance:
:param shunt_active:
:param ys:
:param B:
:param Sbase:
:param m: array of tap modules (for all branches, regardless of their type)
:param theta: array of tap angles (for all branches, regardless of their type)
:param Beq: Array of equivalent susceptance
:param Gsw: Array of branch (converter) losses
:return: Ybus, Yf, Yt, tap
"""
# form the connectivity matrices with the states applied -------------------------------------------------------
br_states_diag = sp.diags(branch_active)
Cf = br_states_diag * Cf
Ct = br_states_diag * Ct
# SHUNT --------------------------------------------------------------------------------------------------------
Yshunt_from_devices = C_bus_shunt * (shunt_admittance * shunt_active / Sbase)
yshunt_f = Cf * Yshunt_from_devices
yshunt_t = Ct * Yshunt_from_devices
# form the admittance matrices ---------------------------------------------------------------------------------
bc2 = 1j * B / 2 # shunt conductance
# mp = circuit.k * m # k is already filled with the appropriate value for each type of branch
tap = m * np.exp(1.0j * theta)
"""
Beq= stat .* branch(:, BEQ); %%FUBM- VSC Equivalent Reactor for absorbing or supplying reactive power and zero constraint in DC side
Gsw= stat .* branch(:, GSW); %%FUBM- VSC Switching losses
k2 = branch(:, K2); %%FUBM- VSC constant depending of how many levels does the VSC is simulating. Default k2 for branches = 1, Default k2 for VSC = sqrt(3)/2
Ytt = Ys + 1j*Bc/2;
Yff = Gsw+( (Ytt+1j*Beq) ./ ((k2.^2).*tap .* conj(tap)) ); %%FUBM- FUBM formulation
Yft = - Ys ./ ( k2.*conj(tap) ); %%FUBM- FUBM formulation
Ytf = - Ys ./ ( k2.*tap );
"""
# compose the primitives
Yff = Gsw + (ys + bc2 + 1.0j * Beq + yshunt_f) / (m * m)
Yft = -ys / np.conj(tap)
Ytf = -ys / tap
Ytt = ys + bc2 + yshunt_t
# compose the matrices
Yf = sp.diags(Yff) * Cf + sp.diags(Yft) * Ct
Yt = sp.diags(Ytf) * Cf + sp.diags(Ytt) * Ct
Ybus = sp.csc_matrix(Cf.T * Yf + Ct.T * Yt)
return Ybus, Yf, Yt, tap
def dSbus_dV(Ybus, V):
"""
Derivatives of the power injections w.r.t the voltage
:param Ybus: Admittance matrix
:param V: complex voltage arrays
:return: dSbus_dVa, dSbus_dVm
"""
diagV = diags(V)
diagVnorm = diags(V / np.abs(V))
Ibus = Ybus * V
diagIbus = diags(Ibus)
dSbus_dVa = 1j * diagV * np.conj(diagIbus - Ybus * diagV) # dSbus / dVa
dSbus_dVm = diagV * np.conj(Ybus * diagVnorm) + np.conj(diagIbus) * diagVnorm # dSbus / dVm
return dSbus_dVa, dSbus_dVm
def dSbr_dV(Yf, Yt, V, F, T, Cf, Ct):
"""
Derivatives of the branch power w.r.t the branch voltage modules and angles
:param Yf: Admittances matrix of the branches with the "from" buses
:param Yt: Admittances matrix of the branches with the "to" buses
:param V: Array of voltages
:param F: Array of branch "from" bus indices
:param T: Array of branch "to" bus indices
:param Cf: Connectivity matrix of the branches with the "from" buses
:param Ct: Connectivity matrix of the branches with the "to" buses
:return: dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm
"""
Yfc = np.conj(Yf)
Ytc = np.conj(Yt)
Vc = np.conj(V)
Ifc = Yfc * Vc # conjugate of "from" current
Itc = Ytc * Vc # conjugate of "to" current
diagIfc = diags(Ifc)
diagItc = diags(Itc)
Vf = V[F]
Vt = V[T]
diagVf = diags(Vf)
diagVt = diags(Vt)
diagVc = diags(Vc)
Vnorm = V / np.abs(V)
diagVnorm = diags(Vnorm)
diagV = diags(V)
CVf = Cf * diagV
CVt = Ct * diagV
CVnf = Cf * diagVnorm
CVnt = Ct * diagVnorm
dSf_dVa = 1j * (diagIfc * CVf - diagVf * Yfc * diagVc)
dSf_dVm = diagVf * np.conj(Yf * diagVnorm) + diagIfc * CVnf
dSt_dVa = 1j * (diagItc * CVt - diagVt * Ytc * diagVc)
dSt_dVm = diagVt * np.conj(Yt * diagVnorm) + diagItc * CVnt
return dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm
def d_dsh(nb, nl, iPxsh, F, T, Ys, k2, tap, V):
"""
This function computes the derivatives of Sbus, Sf and St w.r.t. Ɵsh
- dSbus_dPfsh, dSf_dPfsh, dSt_dPfsh -> if iPxsh=iPfsh
- dSbus_dPfdp, dSf_dPfdp, dSt_dPfdp -> if iPxsh=iPfdp
:param nb: number of buses
:param nl: number of branches
:param iPxsh: array of indices {iPfsh or iPfdp}
:param F: Array of branch "from" bus indices
:param T: Array of branch "to" bus indices
:param Ys: Array of branch series admittances
:param k2: Array of "k2" parameters
:param tap: Array of branch complex taps (ma * exp(1j * theta_sh)
:param V: Array of complex voltages
:return:
- dSbus_dPfsh, dSf_dPfsh, dSt_dPfsh -> if iPxsh=iPfsh
- dSbus_dPfdp, dSf_dPfdp, dSt_dPfdp -> if iPxsh=iPfdp
"""
dSbus_dPxsh = lil_matrix((nb, len(iPxsh)), dtype=complex)
dSf_dshx2 = lil_matrix((nl, len(iPxsh)), dtype=complex)
dSt_dshx2 = lil_matrix((nl, len(iPxsh)), dtype=complex)
for k, idx in enumerate(iPxsh):
f = F[idx]
t = T[idx]
# Partials of Ytt, Yff, Yft and Ytf w.r.t. Ɵ shift
ytt_dsh = 0.0
yff_dsh = 0.0
yft_dsh = -Ys[idx] / (-1j * k2[idx] * np.conj(tap[idx]))
ytf_dsh = -Ys[idx] / (1j * k2[idx] * tap[idx])
# Partials of S w.r.t. Ɵ shift
val_f = V[f] * np.conj(yft_dsh * V[t])
val_t = V[t] * | np.conj(ytf_dsh * V[f]) | numpy.conj |
# -*- coding: utf-8 -*-
"""
asdasdasdas
"""
import itertools
import logging
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.decomposition import PCA
# Activates Verbose on all models.
DEBUG = 0
# Name of the dataset directory.
DATASET_PATH = 'documents/data.csv'
# CSV FILE HAS 19924 ROWS AND 2209 COLUMNS
# EACH ROW REPRESENTS A DOCUMENT
def predict_kmedoids_labels(clusters, n):
labels = labels = np.array([0 for x in range(n)])
for i, rows in clusters.items():
for j in rows:
labels[j] = i
return labels
def kMedoids(data, k, tmax=100):
# determine dimensions of distance matrix data
m, n = data.shape
if k > n:
raise Exception('too many medoids')
# randomly initialize an array of k medoid indices
medoids = np.arange(n)
np.random.shuffle(medoids)
medoids = np.sort(medoids[:k])
# create a copy of the array of medoid indices
new_medoids = np.copy(medoids)
# initialize a dictionary to represent clusters
clusters = {}
for t in range(tmax):
i = 0
taken_values = np.zeros(n)
for kappa in medoids:
clusters[i] = np.array([kappa])
taken_values[kappa] = 1
i += 1
# determine clusters, i. e. arrays of data indices
J = np.argmin(data[:,medoids], axis=1)
for kappa in range(k):
neighbors = np.where(J==kappa)[0]
if len(neighbors) > 1:
clusters[kappa] = neighbors
for i in clusters[kappa]:
if taken_values[i] == 1 and i != medoids[kappa]:
index = np.argwhere(clusters[kappa]==i)
clusters[kappa] = np.delete(clusters[kappa], index)
taken_values[i] = 1
# update cluster medoids
for kappa in range(k):
J = np.mean(data[np.ix_(clusters[kappa],clusters[kappa])],axis=1)
j = np.argmin(J)
new_medoids[kappa] = clusters[kappa][j]
np.sort(new_medoids)
# check for convergence
if np.array_equal(medoids, new_medoids):
break
medoids = np.copy(new_medoids)
else:
# final update of cluster memberships
J = np.argmin(data[:,medoids], axis=1)
for kappa in range(k):
clusters[kappa] = | np.where(J==kappa) | numpy.where |
import argparse
import os
import numpy as np
import pandas as pd
from matplotlib.lines import Line2D
from ovis.reporting.style import *
from ovis.reporting.style import set_matplotlib_style
from ovis.reporting.utils import smooth, update_labels, lighten
from ovis.utils.utils import Header
parser = argparse.ArgumentParser()
parser.add_argument('--figure', default='left', help='[left, right]')
parser.add_argument('--root', default='reports/', help='experiment directory')
parser.add_argument('--exp', default='', type=str,
help='experiment id [default use the exp name specified in the Readme.md]')
parser.add_argument('--dataset', default='binmnist', type=str,
help='dataset id')
# keys
parser.add_argument('--style_key', default='iw', help='style key')
parser.add_argument('--metric', default='train:loss/L_k', help='metric to display')
# plot config
parser.add_argument('--desaturate', default=0.9, type=float, help='desaturate hue')
parser.add_argument('--lighten', default=1., type=float, help='lighten hue')
parser.add_argument('--alpha', default=0.9, type=float, help='opacity')
parser.add_argument('--linewidth', default=1.2, type=float, help='line width')
opt = parser.parse_args()
# matplotlibg style
set_matplotlib_style()
plot_style = {
'linewidth': opt.linewidth,
'alpha': opt.alpha
}
# experiment directory
default_exps = {'left': 'sigmoid-belief-network-inc=iwbound', 'right': 'sigmoid-belief-network-inc=iwrbound'}
if opt.exp == '':
root = os.path.join(opt.root, default_exps[opt.figure])
else:
root = os.path.join(opt.root, opt.ex)
# read data
data = pd.read_csv(os.path.join(root, 'curves.csv'))
filtered_data = data[data['dataset'] == opt.dataset]
filtered_data = filtered_data[filtered_data['_key'] == opt.metric]
print(data)
# plot the figure
figure = plt.figure(figsize=(PLOT_WIDTH, 1.3 * PLOT_HEIGHT), dpi=DPI)
ax = plt.gca()
# color
hue_order = list(filtered_data['estimator'].unique())
palette = [ESTIMATOR_STYLE[h_key]['color'] for h_key in hue_order]
palette = [sns.desaturate(c, opt.desaturate) for c in palette]
palette = [lighten(c, opt.lighten) for c in palette]
# linestyles & markers
style_order = list(sorted(filtered_data[opt.style_key].unique()))
line_styles = [":", "--", "-"]
markers = ["x", "^", "o"]
# draw
with Header("Records"):
for e, estimator in enumerate(hue_order):
for s, style in enumerate(style_order):
sub_df = filtered_data[(filtered_data['estimator'] == estimator) & (filtered_data[opt.style_key] == style)]
sub_df = sub_df.groupby('step')['_value'].mean()
x = sub_df.index.values
y = sub_df.values
color = palette[hue_order.index(estimator)]
if len(y):
y = smooth(y, window_len=15)
plt.plot(x, y, color=color, linestyle=line_styles[s], **plot_style)
idx = np.round(np.linspace(0, len(x) - 1, 6)).astype(int)
print(f"{estimator} - {opt.style_key} = {style} : max. {opt.metric} = {max(y):.3f}")
marker = markers[s]
# marker = ESTIMATOR_STYLE[estimator]['marker']
plt.plot(x[idx], y[idx], color=color, linestyle="", marker=marker, markersize=5, alpha=0.9)
# set axis labels
ax.set_ylabel(opt.metric)
update_labels( | np.array(ax) | numpy.array |
"""Test for helper.py"""
import pickle
import numpy as np
import pytest
import torch
from sklearn.datasets import make_classification
class TestSliceDict:
def assert_dicts_equal(self, d0, d1):
assert d0.keys() == d1.keys()
for key in d0.keys():
assert np.allclose(d0[key], d1[key])
@pytest.fixture
def data(self):
X, y = make_classification(100, 20, n_informative=10, random_state=0)
return X.astype(np.float32), y
@pytest.fixture(scope='session')
def sldict_cls(self):
from scripts.study_case.ID_12.skorch.helper import SliceDict
return SliceDict
@pytest.fixture
def sldict(self, sldict_cls):
return sldict_cls(
f0=np.arange(4),
f1=np.arange(12).reshape(4, 3),
)
def test_init_inconsistent_shapes(self, sldict_cls):
with pytest.raises(ValueError) as exc:
sldict_cls(f0=np.ones((10, 5)), f1=np.ones((11, 5)))
assert str(exc.value) == (
"Initialized with items of different lengths: 10, 11")
@pytest.mark.parametrize('item', [
np.ones(4),
np.ones((4, 1)),
np.ones((4, 4)),
np.ones((4, 10, 7)),
np.ones((4, 1, 28, 28)),
])
def test_set_item_correct_shape(self, sldict, item):
# does not raise
sldict['f2'] = item
@pytest.mark.parametrize('item', [
np.ones(3),
np.ones((1, 100)),
np.ones((5, 1000)),
np.ones((1, 100, 10)),
np.ones((28, 28, 1, 100)),
])
def test_set_item_incorrect_shape_raises(self, sldict, item):
with pytest.raises(ValueError) as exc:
sldict['f2'] = item
assert str(exc.value) == (
"Cannot set array with shape[0] != 4")
@pytest.mark.parametrize('key', [1, 1.2, (1, 2), [3]])
def test_set_item_incorrect_key_type(self, sldict, key):
with pytest.raises(TypeError) as exc:
sldict[key] = np.ones((100, 5))
assert str(exc.value).startswith("Key must be str, not <")
@pytest.mark.parametrize('item', [
np.ones(3),
np.ones((1, 100)),
np.ones((5, 1000)),
np.ones((1, 100, 10)),
np.ones((28, 28, 1, 100)),
])
def test_update_incorrect_shape_raises(self, sldict, item):
with pytest.raises(ValueError) as exc:
sldict.update({'f2': item})
assert str(exc.value) == (
"Cannot set array with shape[0] != 4")
@pytest.mark.parametrize('item', [123, 'hi', [1, 2, 3]])
def test_set_first_item_no_shape_raises(self, sldict_cls, item):
with pytest.raises(AttributeError):
sldict_cls(f0=item)
@pytest.mark.parametrize('kwargs, expected', [
({}, 0),
(dict(a=np.zeros(12)), 12),
(dict(a=np.zeros(12), b=np.ones((12, 5))), 12),
(dict(a=np.ones((10, 1, 1)), b=np.ones((10, 10)), c=np.ones(10)), 10),
])
def test_len_and_shape(self, sldict_cls, kwargs, expected):
sldict = sldict_cls(**kwargs)
assert len(sldict) == expected
assert sldict.shape == (expected,)
def test_get_item_str_key(self, sldict_cls):
sldict = sldict_cls(a=np.ones(5), b=np.zeros(5))
assert (sldict['a'] == np.ones(5)).all()
assert (sldict['b'] == np.zeros(5)).all()
@pytest.mark.parametrize('sl, expected', [
(slice(0, 1), {'f0': np.array([0]), 'f1': np.array([[0, 1, 2]])}),
(slice(1, 2), {'f0': np.array([1]), 'f1': | np.array([[3, 4, 5]]) | numpy.array |
# This Python module is part of the PyRate software package.
#
# Copyright 2021 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import resource
from collections import namedtuple
from typing import List, Dict, Tuple, Any
from nptyping import NDArray, Float32, UInt16
import numpy as np
import pyrate.constants as C
from pyrate.core import mpiops
from pyrate.core.shared import Ifg, join_dicts
from pyrate.core.phase_closure.mst_closure import Edge, WeightedLoop
from pyrate.core.logger import pyratelogger as log
IndexedIfg = namedtuple('IndexedIfg', ['index', 'IfgPhase'])
class IfgPhase:
"""
workaround class to only hold phase data for mpi SwigPyObject pickle error
"""
def __init__(self, phase_data):
self.phase_data = phase_data
def __create_ifg_edge_dict(ifg_files: List[str], params: dict) -> Dict[Edge, IndexedIfg]:
"""Returns a dictionary of indexed ifg 'edges'"""
ifg_files.sort()
ifgs = [Ifg(i) for i in ifg_files]
def _func(ifg, index):
ifg.open()
ifg.nodata_value = params[C.NO_DATA_VALUE]
ifg.convert_to_nans()
ifg.convert_to_radians()
idx_ifg = IndexedIfg(index, IfgPhase(ifg.phase_data))
return idx_ifg
process_ifgs = mpiops.array_split(list(enumerate(ifgs)))
ret_combined = {}
for idx, _ifg in process_ifgs:
ret_combined[Edge(_ifg.first, _ifg.second)] = _func(_ifg, idx)
_ifg.close()
ret_combined = join_dicts(mpiops.comm.allgather(ret_combined))
return ret_combined
def sum_phase_closures(ifg_files: List[str], loops: List[WeightedLoop], params: dict) -> \
Tuple[NDArray[(Any, Any, Any), Float32], NDArray[(Any, Any, Any), UInt16], NDArray[(Any,), UInt16]]:
"""
Compute the closure sum for each pixel in each loop, and count the number of times a pixel
contributes to a failed closure loop (where the summed closure is above/below the
CLOSURE_THR threshold).
:param ifg_files: list of ifg files
:param loops: list of loops
:param params: params dict
:return: Tuple of closure, ifgs_breach_count, num_occurrences_each_ifg
closure: summed closure for each loop.
ifgs_breach_count: shape=(ifg.shape, n_ifgs) number of times a pixel in an ifg fails the closure
check (i.e., has unwrapping error) in all loops under investigation.
num_occurrences_each_ifg: frequency of ifg appearance in all loops.
"""
edge_to_indexed_ifgs = __create_ifg_edge_dict(ifg_files, params)
ifgs = [v.IfgPhase for v in edge_to_indexed_ifgs.values()]
n_ifgs = len(ifgs)
if params[C.PARALLEL]:
# rets = Parallel(n_jobs=params[cf.PROCESSES], verbose=joblib_log_level(cf.LOG_LEVEL))(
# delayed(__compute_ifgs_breach_count)(ifg0, n_ifgs, weighted_loop, edge_to_indexed_ifgs, params)
# for weighted_loop in loops
# )
# for k, r in enumerate(rets):
# closure_dict[k], ifgs_breach_count_dict[k] = r
# TODO: enable multiprocessing - needs pickle error workaround
closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(loops)), dtype=np.float32)
ifgs_breach_count = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)
for k, weighted_loop in enumerate(loops):
closure[:, :, k], ifgs_breach_count_l = __compute_ifgs_breach_count(weighted_loop, edge_to_indexed_ifgs,
params)
ifgs_breach_count += ifgs_breach_count_l
else:
process_loops = mpiops.array_split(loops)
closure_process = np.zeros(shape=(* ifgs[0].phase_data.shape, len(process_loops)), dtype=np.float32)
ifgs_breach_count_process = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)
for k, weighted_loop in enumerate(process_loops):
closure_process[:, :, k], ifgs_breach_count_l = \
__compute_ifgs_breach_count(weighted_loop, edge_to_indexed_ifgs, params)
ifgs_breach_count_process += ifgs_breach_count_l # process
total_gb = mpiops.comm.allreduce(ifgs_breach_count_process.nbytes / 1e9, op=mpiops.MPI.SUM)
log.debug(f"Memory usage to compute ifgs_breach_count_process was {total_gb} GB")
log.debug(f"shape of ifgs_breach_count_process is {ifgs_breach_count_process.shape}")
log.debug(f"dtype of ifgs_breach_count_process is {ifgs_breach_count_process.dtype}")
total_gb = mpiops.comm.allreduce(closure_process.nbytes / 1e9, op=mpiops.MPI.SUM)
log.debug(f"Memory usage to compute closure_process was {total_gb} GB")
if mpiops.rank == 0:
ifgs_breach_count = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)
# closure
closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(loops)), dtype=np.float32)
main_process_indices = mpiops.array_split(range(len(loops))).astype(np.uint16)
closure[:, :, main_process_indices] = closure_process
for rank in range(1, mpiops.size):
rank_indices = mpiops.array_split(range(len(loops)), rank).astype(np.uint16)
this_rank_closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(rank_indices)), dtype=np.float32)
mpiops.comm.Recv(this_rank_closure, source=rank, tag=rank)
closure[:, :, rank_indices] = this_rank_closure
else:
closure = None
ifgs_breach_count = None
mpiops.comm.Send(closure_process, dest=0, tag=mpiops.rank)
if mpiops.MPI_INSTALLED:
mpiops.comm.Reduce([ifgs_breach_count_process, mpiops.MPI.UINT16_T],
[ifgs_breach_count, mpiops.MPI.UINT16_T], op=mpiops.MPI.SUM, root=0) # global
else:
ifgs_breach_count = mpiops.comm.reduce(ifgs_breach_count_process, op=mpiops.sum0_op, root=0)
log.debug(f"successfully summed phase closure breach array")
num_occurrences_each_ifg = None
if mpiops.rank == 0:
num_occurrences_each_ifg = _find_num_occurrences_each_ifg(loops, edge_to_indexed_ifgs, n_ifgs)
return closure, ifgs_breach_count, num_occurrences_each_ifg
def _find_num_occurrences_each_ifg(loops: List[WeightedLoop],
edge_to_indexed_ifgs: Dict[Edge, IndexedIfg],
n_ifgs: int) -> NDArray[(Any,), UInt16]:
"""find how many times each ifg appears in total in all loops"""
num_occurrences_each_ifg = np.zeros(shape=n_ifgs, dtype=np.uint16)
for weighted_loop in loops:
for signed_edge in weighted_loop.loop:
indexed_ifg = edge_to_indexed_ifgs[signed_edge.edge]
ifg_index = indexed_ifg.index
num_occurrences_each_ifg[ifg_index] += 1
return num_occurrences_each_ifg
def __compute_ifgs_breach_count(weighted_loop: WeightedLoop,
edge_to_indexed_ifgs: Dict[Edge, IndexedIfg], params: dict) \
-> Tuple[NDArray[(Any, Any), Float32], NDArray[(Any, Any, Any), UInt16]]:
"""Compute summed `closure` of each loop, and compute `ifgs_breach_count` for each pixel."""
n_ifgs = len(edge_to_indexed_ifgs)
indexed_ifg = list(edge_to_indexed_ifgs.values())[0]
ifg = indexed_ifg.IfgPhase
closure_thr = params[C.CLOSURE_THR] * np.pi
use_median = params[C.SUBTRACT_MEDIAN]
closure = np.zeros(shape=ifg.phase_data.shape, dtype=np.float32)
# initiate variable for check of unwrapping issues at the same pixels in all loops
ifgs_breach_count = np.zeros(shape=(ifg.phase_data.shape + (n_ifgs,)), dtype=np.uint16)
for signed_edge in weighted_loop.loop:
indexed_ifg = edge_to_indexed_ifgs[signed_edge.edge]
ifg = indexed_ifg.IfgPhase
closure += signed_edge.sign * ifg.phase_data
if use_median:
closure -= np.nanmedian(closure) # optionally subtract the median closure phase
# this will deal with nans in `closure`, i.e., nans are not selected in indices_breaching_threshold
indices_breaching_threshold = | np.absolute(closure) | numpy.absolute |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
class Series:
def __init__(self):
self.n = 0
self.series = {'1D':None, '2D':None, '3D':None}
self.tmax = {'1D':0, '2D':0, '3D':0}
self.labels = {'1D':None, '2D':None, '3D':None}
self.task = '0'
def __len__(self):
return self.n
def read(self, PATH):
self.task = PATH[-5]
series = pd.read_csv(PATH, header=None, sep="\n")
series = series[0].str.split(';')
dim_idx = series.map(lambda x: int(float(x[0])))
labels = PATH[:-9]+'ref'+self.task+'.txt'
labels = pd.read_csv(os.path.join(labels), header=None, sep=';').drop(0, axis=1)
self.n = len(series)
for i, dim in enumerate(['1D', '2D', '3D']):
self.labels[dim] = labels[dim_idx == i+1]
self.tmax[dim] = max(series[dim_idx == i+1].map(lambda x:len(x[1:])))
if i == 0:
self.series[dim] = series[dim_idx == i+1].map(lambda x:np.array(x[1:], dtype='float64'))
else:
self.series[dim] = series[dim_idx == i+1].map(lambda x:np.array(x[1:], dtype='float64').reshape(-1, i+1, order='F'))
def differentiate(self, dim, d, thres):
names = list(self.series.keys())
def get_weight_ffd(d, thres, lim):
w, k = [1.], 1
ctr = 0
while True:
w_ = -w[-1] / k * (d - k + 1)
if abs(w_) < thres:
break
w.append(w_)
k += 1
ctr += 1
if ctr == lim - 1:
break
w = np.array(w[::-1]).reshape(-1, 1)
return w
w = get_weight_ffd(d, thres, self.tmax[names[dim-1]])
def frac_diff_ffd(x, d, thres=1e-5):
width = len(w) - 1
output = []
for i in range(width, len(x)):
output.append(np.dot(w.T, x[i - width:i + 1])[0])
return np.array(output)
def function(serie):
if dim == 1:
return frac_diff_ffd(serie, d=d, thres=thres)
elif dim == 2:
x = frac_diff_ffd(serie[:,0], d=d, thres=thres).reshape(-1, 1)
y = frac_diff_ffd(serie[:,1], d=d, thres=thres).reshape(-1, 1)
return | np.concatenate([x,y], axis=1) | numpy.concatenate |
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
#load the data (cat/not-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
index = 4
plt.imshow(train_set_x_orig[index])
print("y="+str(train_set_y[:, index])+ ", it's a '" + classes[ | np.squeeze(train_set_y[:, index]) | numpy.squeeze |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 16:13:39 2021
@author: ruizca
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord, FK5
from astropy.table import Table, unique, join
from astropy.utils.console import color_print
from astropy_healpix import HEALPix
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
from matplotlib.patches import Polygon
from mocpy import MOC
from mocpy.mocpy import flatten_pixels
from scipy.stats import median_abs_deviation
from tqdm.auto import tqdm
from .. import rapidxmm
from .ecf import ECF
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
#plt.rc('text', usetex=True)
plt.rcParams['mathtext.fontset'] = "stix"
plt.rcParams['mathtext.rm'] = "STIXGeneral"
plt.rcParams['font.family'] = "STIXGeneral"
plt.rcParams["axes.formatter.use_mathtext"] = True
# Numpy random number generator
rng = np.random.default_rng()
def get_neighbours(npixel, hp, level=5):
# The central pixel is the first one
# The output of hp.neighbours always follows the
# same order, starting SW and rotating clockwise
neighbours_level = [None] * (level + 1)
neighbours_level[0] = [npixel]
npixel_neighbours = [npixel]
for i in range(1, level + 1):
neighbours_level[i] = hp.neighbours(neighbours_level[i - 1]).flatten()
npixel_neighbours += list(neighbours_level[i])
sorted_neighbours = Table()
sorted_neighbours["npixel"] = npixel_neighbours
sorted_neighbours["order"] = range(len(npixel_neighbours))
sorted_neighbours = unique(sorted_neighbours, keys=["npixel"])
sorted_neighbours.sort("order")
return sorted_neighbours
def get_bkg_npixels(src_center, nside, npixels=100):
order = np.log2(nside).astype(int)
bkg_moc_outer = MOC.from_cone(src_center.ra, src_center.dec, 120*u.arcsec, order)
bkg_moc_inner = MOC.from_cone(src_center.ra, src_center.dec, 60*u.arcsec, order)
bkg_moc = bkg_moc_outer.difference(bkg_moc_inner)
bkg_npixels = flatten_pixels(bkg_moc._interval_set._intervals, order)
return rng.choice(bkg_npixels, size=npixels, replace=False).tolist()
def get_bkg_data(npixel, obsid, hp):
src_center = hp.healpix_to_skycoord(npixel)
bkg_npixels = get_bkg_npixels(src_center, hp.nside, npixels=100)
bkg_data = rapidxmm.query_npixels(
bkg_npixels, obstype="pointed", instrum="PN"
)
mask = bkg_data["obsid"] == obsid
bkg_data = bkg_data[mask]
if len(bkg_data) < 15:
bkg_data = None
return bkg_data
def stats_bootstrap(src, bkg, exp, eef, ecf, ac=None, nbkg=None, nsim=1000):
# Calculate median and MAD for the stack using bootstraping
nstack, npixels, nbands = src.shape
cr = np.zeros((nsim, npixels, nbands))
cr_err = | np.zeros((nsim, npixels, nbands)) | numpy.zeros |
"""
Proto
Contains the following library code useful for prototyping robotic algorithms:
- YAML
- TIME
- PROFILING
- MATHS
- LINEAR ALGEBRA
- GEOMETRY
- LIE
- TRANSFORM
- MATPLOTLIB
- CV
- DATASET
- FILTER
- STATE ESTIMATION
- CALIBRATION
- SIMULATION
- UNITTESTS
"""
import os
import sys
import glob
import math
import time
import copy
import random
import pickle
import json
import signal
from datetime import datetime
from pathlib import Path
from enum import Enum
from dataclasses import dataclass
from collections import namedtuple
from types import FunctionType
from typing import Optional
import cv2
import yaml
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
import pandas
import cProfile
from pstats import Stats
###############################################################################
# YAML
###############################################################################
def load_yaml(yaml_path):
""" Load YAML and return a named tuple """
assert yaml_path is not None
assert yaml_path != ""
# Load yaml_file
yaml_data = None
with open(yaml_path, "r") as stream:
yaml_data = yaml.safe_load(stream)
# Convert dict to named tuple
data = json.dumps(yaml_data) # Python dict to json
data = json.loads(
data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
return data
###############################################################################
# TIME
###############################################################################
def sec2ts(time_s):
""" Convert time in seconds to timestamp """
return int(time_s * 1e9)
def ts2sec(ts):
""" Convert timestamp to seconds """
return ts * 1e-9
###############################################################################
# PROFILING
###############################################################################
def profile_start():
""" Start profile """
prof = cProfile.Profile()
prof.enable()
return prof
def profile_stop(prof, **kwargs):
""" Stop profile """
key = kwargs.get('key', 'cumtime')
N = kwargs.get('N', 10)
stats = Stats(prof)
stats.strip_dirs()
stats.sort_stats(key).print_stats(N)
###############################################################################
# MATHS
###############################################################################
from math import pi
from math import isclose
from math import sqrt
# from math import floor
from math import cos
from math import sin
from math import tan
from math import acos
from math import atan
def rmse(errors):
""" Root Mean Squared Error """
return np.sqrt(np.mean(errors**2))
###############################################################################
# LINEAR ALGEBRA
###############################################################################
from numpy import rad2deg
from numpy import deg2rad
from numpy import sinc
from numpy import zeros
from numpy import ones
from numpy import eye
from numpy import trace
from numpy import diagonal as diag
from numpy import cross
from numpy.linalg import norm
from numpy.linalg import inv
from numpy.linalg import pinv
from numpy.linalg import matrix_rank as rank
from numpy.linalg import eig
from numpy.linalg import svd
from numpy.linalg import cholesky as chol
def normalize(v):
""" Normalize vector v """
n = np.linalg.norm(v)
if n == 0:
return v
return v / n
def full_rank(A):
""" Check if matrix A is full rank """
return rank(A) == A.shape[0]
def skew(vec):
""" Form skew-symmetric matrix from vector `vec` """
assert vec.shape == (3,) or vec.shape == (3, 1)
x, y, z = vec
return np.array([[0.0, -z, y], [z, 0.0, -x], [-y, x, 0.0]])
def skew_inv(A):
""" Form skew symmetric matrix vector """
assert A.shape == (3, 3)
return np.array([A[2, 1], A[0, 2], A[1, 0]])
def fwdsubs(L, b):
"""
Solving a lower triangular system by forward-substitution
Input matrix L is an n by n lower triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
L x = b
"""
assert L.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if L[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / L[j, j]
b[j:n] = b[j:n] - L[j:n, j] * x[j]
def bwdsubs(U, b):
"""
Solving an upper triangular system by back-substitution
Input matrix U is an n by n upper triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
U x = b
"""
assert U.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if U[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / U(j, j)
b[0:j] = b[0:j] - U[0:j, j] * x[j]
def solve_svd(A, b):
"""
Solve Ax = b with SVD
"""
# compute svd of A
U, s, Vh = svd(A)
# U diag(s) Vh x = b <=> diag(s) Vh x = U.T b = c
c = np.dot(U.T, b)
# diag(s) Vh x = c <=> Vh x = diag(1/s) c = w (trivial inversion of a diagonal matrix)
w = np.dot(np.diag(1 / s), c)
# Vh x = w <=> x = Vh.H w (where .H stands for hermitian = conjugate transpose)
x = np.dot(Vh.conj().T, w)
return x
def schurs_complement(H, g, m, r, precond=False):
""" Shurs-complement """
assert H.shape[0] == (m + r)
# H = [Hmm, Hmr
# Hrm, Hrr];
Hmm = H[0:m, 0:m]
Hmr = H[0:m, m:]
Hrm = Hmr.T
Hrr = H[m:, m:]
# g = [gmm, grr]
gmm = g[1:]
grr = g[m:]
# Precondition Hmm
if precond:
Hmm = 0.5 * (Hmm + Hmm.T)
# Invert Hmm
assert rank(Hmm) == Hmm.shape[0]
(w, V) = eig(Hmm)
W_inv = diag(1.0 / w)
Hmm_inv = V * W_inv * V.T
# Schurs complement
H_marg = Hrr - Hrm * Hmm_inv * Hmr
g_marg = grr - Hrm * Hmm_inv * gmm
return (H_marg, g_marg)
def is_pd(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = chol(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_pd(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if is_pd(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not is_pd(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def matrix_equal(A, B, tol=1e-8, verbose=False):
""" Compare matrices `A` and `B` """
diff = A - B
if len(diff.shape) == 1:
for i in range(diff.shape[0]):
if abs(diff[i]) > tol:
if verbose:
print("A - B:")
print(diff)
elif len(diff.shape) == 2:
for i in range(diff.shape[0]):
for j in range(diff.shape[1]):
if abs(diff[i, j]) > tol:
if verbose:
print("A - B:")
print(diff)
return False
return True
def plot_compare_matrices(title_A, A, title_B, B):
""" Plot compare matrices """
plt.matshow(A)
plt.colorbar()
plt.title(title_A)
plt.matshow(B)
plt.colorbar()
plt.title(title_B)
diff = A - B
plt.matshow(diff)
plt.colorbar()
plt.title(f"{title_A} - {title_B}")
print(f"max_coeff({title_A}): {np.max(np.max(A))}")
print(f"max_coeff({title_B}): {np.max(np.max(B))}")
print(f"min_coeff({title_A}): {np.min(np.min(A))}")
print(f"min_coeff({title_B}): {np.min(np.min(B))}")
print(f"max_diff: {np.max(np.max(np.abs(diff)))}")
plt.show()
def check_jacobian(jac_name, fdiff, jac, threshold, verbose=False):
""" Check jacobians """
# Check if numerical diff is same as analytical jacobian
if matrix_equal(fdiff, jac, threshold):
if verbose:
print(f"Check [{jac_name}] passed!")
return True
# Failed - print differences
if verbose:
fdiff_minus_jac = fdiff - jac
print(f"Check [{jac_name}] failed!")
print("-" * 60)
print("J_fdiff - J:")
print(np.round(fdiff_minus_jac, 4))
print()
print("J_fdiff:")
print(np.round(fdiff, 4))
print()
print("J:")
print(np.round(jac, 4))
print()
print("-" * 60)
return False
###############################################################################
# GEOMETRY
###############################################################################
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1
def lerp2d(p0, p1, t):
""" Linear interpolation 2D """
assert len(p0) == 2
assert len(p1) == 2
assert t <= 1.0 and t >= 0.0
x = lerp(p0[0], p1[0], t)
y = lerp(p0[1], p1[1], t)
return np.array([x, y])
def lerp3d(p0, p1, t):
""" Linear interpolation 3D """
assert len(p0) == 3
assert len(p1) == 3
assert t <= 1.0 and t >= 0.0
x = lerp(p0[0], p1[0], t)
y = lerp(p0[1], p1[1], t)
z = lerp(p0[2], p1[2], t)
return np.array([x, y, z])
def circle(r, theta):
""" Circle """
x = r * cos(theta)
y = r * sin(theta)
return np.array([x, y])
def sphere(rho, theta, phi):
"""
Sphere
Args:
rho (float): Sphere radius
theta (float): longitude [rad]
phi (float): Latitude [rad]
Returns:
Point on sphere
"""
x = rho * sin(theta) * cos(phi)
y = rho * sin(theta) * sin(phi)
z = rho * cos(theta)
return np.array([x, y, z])
def circle_loss(c, x, y):
"""
Calculate the algebraic distance between the data points and the mean
circle centered at c=(xc, yc)
"""
xc, yc = c
# Euclidean dist from center (xc, yc)
Ri = np.sqrt((x - xc)**2 + (y - yc)**2)
return Ri - Ri.mean()
def find_circle(x, y):
"""
Find the circle center and radius given (x, y) data points using least
squares. Returns `(circle_center, circle_radius, residual)`
"""
x_m = np.mean(x)
y_m = np.mean(y)
center_init = x_m, y_m
center, _ = scipy.optimize.leastsq(circle_loss, center_init, args=(x, y))
xc, yc = center
radii = np.sqrt((x - xc)**2 + (y - yc)**2)
radius = radii.mean()
residual = np.sum((radii - radius)**2)
return (center, radius, residual)
def bresenham(p0, p1):
"""
Bresenham's line algorithm is a line drawing algorithm that determines the
points of an n-dimensional raster that should be selected in order to form
a close approximation to a straight line between two points. It is commonly
used to draw line primitives in a bitmap image (e.g. on a computer screen),
as it uses only integer addition, subtraction and bit shifting, all of
which are very cheap operations in standard computer architectures.
Args:
p0 (np.array): Starting point (x, y)
p1 (np.array): End point (x, y)
Returns:
A list of (x, y) intermediate points from p0 to p1.
"""
x0, y0 = p0
x1, y1 = p1
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1.0 if x0 < x1 else -1.0
sy = 1.0 if y0 < y1 else -1.0
err = dx - dy
line = []
while True:
line.append([x0, y0])
if x0 == x1 and y0 == y1:
return line
e2 = 2 * err
if e2 > -dy:
# overshot in the y direction
err = err - dy
x0 = x0 + sx
if e2 < dx:
# overshot in the x direction
err = err + dx
y0 = y0 + sy
###############################################################################
# LIE
###############################################################################
def Exp(phi):
""" Exponential Map """
assert phi.shape == (3,) or phi.shape == (3, 1)
if norm(phi) < 1e-3:
C = eye(3) + skew(phi)
return C
phi_norm = norm(phi)
phi_skew = skew(phi)
phi_skew_sq = phi_skew @ phi_skew
C = eye(3)
C += (sin(phi_norm) / phi_norm) * phi_skew
C += ((1 - cos(phi_norm)) / phi_norm**2) * phi_skew_sq
return C
def Log(C):
""" Logarithmic Map """
assert C.shape == (3, 3)
# phi = acos((trace(C) - 1) / 2);
# u = skew_inv(C - C') / (2 * sin(phi));
# rvec = phi * u;
C00, C01, C02 = C[0, :]
C10, C11, C12 = C[1, :]
C20, C21, C22 = C[2, :]
tr = np.trace(C)
rvec = None
if tr + 1.0 < 1e-10:
if abs(C22 + 1.0) > 1.0e-5:
x = np.array([C02, C12, 1.0 + C22])
rvec = (pi / np.sqrt(2.0 + 2.0 * C22)) @ x
elif abs(C11 + 1.0) > 1.0e-5:
x = np.array([C01, 1.0 + C11, C21])
rvec = (pi / np.sqrt(2.0 + 2.0 * C11)) @ x
else:
x = np.array([1.0 + C00, C10, C20])
rvec = (pi / np.sqrt(2.0 + 2.0 * C00)) @ x
else:
tr_3 = tr - 3.0 # always negative
if tr_3 < -1e-7:
theta = acos((tr - 1.0) / 2.0)
magnitude = theta / (2.0 * sin(theta))
else:
# when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
# use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
# see https://github.com/borglab/gtsam/issues/746 for details
magnitude = 0.5 - tr_3 / 12.0
rvec = magnitude @ np.array([C21 - C12, C02 - C20, C10 - C01])
return rvec
def Jr(theta):
"""
Right jacobian
Forster, Christian, et al. "IMU preintegration on manifold for efficient
visual-inertial maximum-a-posteriori estimation." Georgia Institute of
Technology, 2015.
[Page 2, Equation (8)]
"""
theta_norm = norm(theta)
theta_norm_sq = theta_norm * theta_norm
theta_norm_cube = theta_norm_sq * theta_norm
theta_skew = skew(theta)
theta_skew_sq = theta_skew @ theta_skew
J = eye(3)
J -= ((1 - cos(theta_norm)) / theta_norm_sq) * theta_skew
J += (theta_norm - sin(theta_norm)) / (theta_norm_cube) * theta_skew_sq
return J
def Jr_inv(theta):
""" Inverse right jacobian """
theta_norm = norm(theta)
theta_norm_sq = theta_norm * theta_norm
theta_skew = skew(theta)
theta_skew_sq = theta_skew @ theta_skew
A = 1.0 / theta_norm_sq
B = (1 + cos(theta_norm)) / (2 * theta_norm * sin(theta_norm))
J = eye(3)
J += 0.5 * theta_skew
J += (A - B) * theta_skew_sq
return J
def boxplus(C, alpha):
""" Box plus """
# C_updated = C [+] alpha
C_updated = C * Exp(alpha)
return C_updated
def boxminus(C_a, C_b):
""" Box minus """
# alpha = C_a [-] C_b
alpha = Log(inv(C_b) * C_a)
return alpha
###############################################################################
# TRANSFORM
###############################################################################
def homogeneous(p):
""" Turn point `p` into its homogeneous form """
return np.array([*p, 1.0])
def dehomogeneous(hp):
""" De-homogenize point `hp` into `p` """
return hp[0:3]
def rotx(theta):
""" Form rotation matrix around x axis """
row0 = [1.0, 0.0, 0.0]
row1 = [0.0, cos(theta), -sin(theta)]
row2 = [0.0, sin(theta), cos(theta)]
return np.array([row0, row1, row2])
def roty(theta):
""" Form rotation matrix around y axis """
row0 = [cos(theta), 0.0, sin(theta)]
row1 = [0.0, 1.0, 0.0]
row2 = [-sin(theta), 0.0, cos(theta)]
return np.array([row0, row1, row2])
def rotz(theta):
""" Form rotation matrix around z axis """
row0 = [cos(theta), -sin(theta), 0.0]
row1 = [sin(theta), cos(theta), 0.0]
row2 = [0.0, 0.0, 1.0]
return np.array([row0, row1, row2])
def aa2quat(angle, axis):
"""
Convert angle-axis to quaternion
Source:
<NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv
preprint arXiv:1711.02508 (2017).
[Page 22, eq (101), "Quaternion and rotation vector"]
"""
ax, ay, az = axis
qw = cos(angle / 2.0)
qx = ax * sin(angle / 2.0)
qy = ay * sin(angle / 2.0)
qz = az * sin(angle / 2.0)
return np.array([qw, qx, qy, qz])
def rvec2rot(rvec):
""" Rotation vector to rotation matrix """
# If small rotation
theta = sqrt(rvec @ rvec) # = norm(rvec), but faster
eps = 1e-8
if theta < eps:
return skew(rvec)
# Convert rvec to rotation matrix
rvec = rvec / theta
x, y, z = rvec
c = cos(theta)
s = sin(theta)
C = 1 - c
xs = x * s
ys = y * s
zs = z * s
xC = x * C
yC = y * C
zC = z * C
xyC = x * yC
yzC = y * zC
zxC = z * xC
row0 = [x * xC + c, xyC - zs, zxC + ys]
row1 = [xyC + zs, y * yC + c, yzC - xs]
row2 = [zxC - ys, yzC + xs, z * zC + c]
return np.array([row0, row1, row2])
def vecs2axisangle(u, v):
""" From 2 vectors form an axis-angle vector """
angle = math.acos(u.T * v)
ax = normalize(np.cross(u, v))
return ax * angle
def euler321(yaw, pitch, roll):
"""
Convert yaw, pitch, roll in radians to a 3x3 rotation matrix.
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 85-86, "The Aerospace Sequence"]
"""
psi = yaw
theta = pitch
phi = roll
cpsi = cos(psi)
spsi = sin(psi)
ctheta = cos(theta)
stheta = sin(theta)
cphi = cos(phi)
sphi = sin(phi)
C11 = cpsi * ctheta
C21 = spsi * ctheta
C31 = -stheta
C12 = cpsi * stheta * sphi - spsi * cphi
C22 = spsi * stheta * sphi + cpsi * cphi
C32 = ctheta * sphi
C13 = cpsi * stheta * cphi + spsi * sphi
C23 = spsi * stheta * cphi - cpsi * sphi
C33 = ctheta * cphi
return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]])
def euler2quat(yaw, pitch, roll):
"""
Convert yaw, pitch, roll in radians to a quaternion.
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 166-167, "Euler Angles to Quaternion"]
"""
psi = yaw # Yaw
theta = pitch # Pitch
phi = roll # Roll
c_phi = cos(phi / 2.0)
c_theta = cos(theta / 2.0)
c_psi = cos(psi / 2.0)
s_phi = sin(phi / 2.0)
s_theta = sin(theta / 2.0)
s_psi = sin(psi / 2.0)
qw = c_psi * c_theta * c_phi + s_psi * s_theta * s_phi
qx = c_psi * c_theta * s_phi - s_psi * s_theta * c_phi
qy = c_psi * s_theta * c_phi + s_psi * c_theta * s_phi
qz = s_psi * c_theta * c_phi - c_psi * s_theta * s_phi
mag = sqrt(qw**2 + qx**2 + qy**2 + qz**2)
return np.array([qw / mag, qx / mag, qy / mag, qz / mag])
def quat2euler(q):
"""
Convert quaternion to euler angles (yaw, pitch, roll).
Source:
Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with
Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J:
Princeton University Press, 1999. Print.
[Page 168, "Quaternion to Euler Angles"]
"""
qw, qx, qy, qz = q
m11 = (2 * qw**2) + (2 * qx**2) - 1
m12 = 2 * (qx * qy + qw * qz)
m13 = 2 * qx * qz - 2 * qw * qy
m23 = 2 * qy * qz + 2 * qw * qx
m33 = (2 * qw**2) + (2 * qz**2) - 1
psi = math.atan2(m12, m11)
theta = math.asin(-m13)
phi = math.atan2(m23, m33)
ypr = np.array([psi, theta, phi])
return ypr
def quat2rot(q):
"""
Convert quaternion to 3x3 rotation matrix.
Source:
<NAME>. "A tutorial on se (3) transformation parameterizations
and on-manifold optimization." University of Malaga, Tech. Rep 3 (2010): 6.
[Page 18, Equation (2.20)]
"""
assert len(q) == 4
qw, qx, qy, qz = q
qx2 = qx**2
qy2 = qy**2
qz2 = qz**2
qw2 = qw**2
# Homogeneous form
C11 = qw2 + qx2 - qy2 - qz2
C12 = 2.0 * (qx * qy - qw * qz)
C13 = 2.0 * (qx * qz + qw * qy)
C21 = 2.0 * (qx * qy + qw * qz)
C22 = qw2 - qx2 + qy2 - qz2
C23 = 2.0 * (qy * qz - qw * qx)
C31 = 2.0 * (qx * qz - qw * qy)
C32 = 2.0 * (qy * qz + qw * qx)
C33 = qw2 - qx2 - qy2 + qz2
return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]])
def rot2euler(C):
"""
Convert 3x3 rotation matrix to euler angles (yaw, pitch, roll).
"""
assert C.shape == (3, 3)
q = rot2quat(C)
return quat2euler(q)
def rot2quat(C):
"""
Convert 3x3 rotation matrix to quaternion.
"""
assert C.shape == (3, 3)
m00 = C[0, 0]
m01 = C[0, 1]
m02 = C[0, 2]
m10 = C[1, 0]
m11 = C[1, 1]
m12 = C[1, 2]
m20 = C[2, 0]
m21 = C[2, 1]
m22 = C[2, 2]
tr = m00 + m11 + m22
if tr > 0:
S = sqrt(tr + 1.0) * 2.0
# S=4*qw
qw = 0.25 * S
qx = (m21 - m12) / S
qy = (m02 - m20) / S
qz = (m10 - m01) / S
elif ((m00 > m11) and (m00 > m22)):
S = sqrt(1.0 + m00 - m11 - m22) * 2.0
# S=4*qx
qw = (m21 - m12) / S
qx = 0.25 * S
qy = (m01 + m10) / S
qz = (m02 + m20) / S
elif m11 > m22:
S = sqrt(1.0 + m11 - m00 - m22) * 2.0
# S=4*qy
qw = (m02 - m20) / S
qx = (m01 + m10) / S
qy = 0.25 * S
qz = (m12 + m21) / S
else:
S = sqrt(1.0 + m22 - m00 - m11) * 2.0
# S=4*qz
qw = (m10 - m01) / S
qx = (m02 + m20) / S
qy = (m12 + m21) / S
qz = 0.25 * S
return quat_normalize(np.array([qw, qx, qy, qz]))
# QUATERNION ##################################################################
def quat_norm(q):
""" Returns norm of a quaternion """
qw, qx, qy, qz = q
return sqrt(qw**2 + qx**2 + qy**2 + qz**2)
def quat_normalize(q):
""" Normalize quaternion """
n = quat_norm(q)
qw, qx, qy, qz = q
return np.array([qw / n, qx / n, qy / n, qz / n])
def quat_conj(q):
""" Return conjugate quaternion """
qw, qx, qy, qz = q
q_conj = np.array([qw, -qx, -qy, -qz])
return q_conj
def quat_inv(q):
""" Invert quaternion """
return quat_conj(q)
def quat_left(q):
""" Quaternion left product matrix """
qw, qx, qy, qz = q
row0 = [qw, -qx, -qy, -qz]
row1 = [qx, qw, -qz, qy]
row2 = [qy, qz, qw, -qx]
row3 = [qz, -qy, qx, qw]
return np.array([row0, row1, row2, row3])
def quat_right(q):
""" Quaternion right product matrix """
qw, qx, qy, qz = q
row0 = [qw, -qx, -qy, -qz]
row1 = [qx, qw, qz, -qy]
row2 = [qy, -qz, qw, qx]
row3 = [qz, qy, -qx, qw]
return np.array([row0, row1, row2, row3])
def quat_lmul(p, q):
""" Quaternion left multiply """
assert len(p) == 4
assert len(q) == 4
lprod = quat_left(p)
return lprod @ q
def quat_rmul(p, q):
""" Quaternion right multiply """
assert len(p) == 4
assert len(q) == 4
rprod = quat_right(q)
return rprod @ p
def quat_mul(p, q):
""" Quaternion multiply p * q """
return quat_lmul(p, q)
def quat_omega(w):
""" Quaternion omega matrix """
return np.block([[-1.0 * skew(w), w], [w.T, 0.0]])
def quat_delta(dalpha):
""" Form quaternion from small angle rotation vector dalpha """
half_norm = 0.5 * norm(dalpha)
scalar = cos(half_norm)
vector = sinc(half_norm) * 0.5 * dalpha
dqw = scalar
dqx, dqy, dqz = vector
dq = np.array([dqw, dqx, dqy, dqz])
return dq
def quat_integrate(q_k, w, dt):
"""
<NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv
preprint arXiv:1711.02508 (2017).
[Section 4.6.1 Zeroth-order integration, p.47]
"""
w_norm = norm(w)
q_scalar = 0.0
q_vec = np.array([0.0, 0.0, 0.0])
if w_norm > 1e-5:
q_scalar = cos(w_norm * dt * 0.5)
q_vec = w / w_norm * sin(w_norm * dt * 0.5)
else:
q_scalar = 1.0
q_vec = [0.0, 0.0, 0.0]
q_kp1 = quat_mul(q_k, np.array([q_scalar, q_vec]))
return q_kp1
def quat_slerp(q_i, q_j, t):
""" Quaternion Slerp `q_i` and `q_j` with parameter `t` """
assert len(q_i) == 4
assert len(q_j) == 4
assert t >= 0.0 and t <= 1.0
# Compute the cosine of the angle between the two vectors.
dot_result = q_i @ q_j
# If the dot product is negative, slerp won't take
# the shorter path. Note that q_j and -q_j are equivalent when
# the negation is applied to all four components. Fix by
# reversing one quaternion.
if dot_result < 0.0:
q_j = -q_j
dot_result = -dot_result
DOT_THRESHOLD = 0.9995
if dot_result > DOT_THRESHOLD:
# If the inputs are too close for comfort, linearly interpolate
# and normalize the result.
return q_i + t * (q_j - q_i)
# Since dot is in range [0, DOT_THRESHOLD], acos is safe
theta_0 = acos(dot_result) # theta_0 = angle between input vectors
theta = theta_0 * t # theta = angle between q_i and result
sin_theta = sin(theta) # compute this value only once
sin_theta_0 = sin(theta_0) # compute this value only once
# == sin(theta_0 - theta) / sin(theta_0)
s0 = cos(theta) - dot_result * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
return (s0 * q_i) + (s1 * q_j)
# TF ##########################################################################
def tf(rot, trans):
"""
Form 4x4 homogeneous transformation matrix from rotation `rot` and
translation `trans`. Where the rotation component `rot` can be a rotation
matrix or a quaternion.
"""
C = None
if rot.shape == (4,) or rot.shape == (4, 1):
C = quat2rot(rot)
elif rot.shape == (3, 3):
C = rot
else:
raise RuntimeError("Invalid rotation!")
T = np.eye(4, 4)
T[0:3, 0:3] = C
T[0:3, 3] = trans
return T
def tf_rot(T):
""" Return rotation matrix from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return T[0:3, 0:3]
def tf_quat(T):
""" Return quaternion from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return rot2quat(tf_rot(T))
def tf_trans(T):
""" Return translation vector from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return T[0:3, 3]
def tf_inv(T):
""" Invert 4x4 homogeneous transform """
assert T.shape == (4, 4)
return np.linalg.inv(T)
def tf_point(T, p):
""" Transform 3d point """
assert T.shape == (4, 4)
assert p.shape == (3,) or p.shape == (3, 1)
hpoint = np.array([p[0], p[1], p[2], 1.0])
return (T @ hpoint)[0:3]
def tf_hpoint(T, hp):
""" Transform 3d point """
assert T.shape == (4, 4)
assert hp.shape == (4,) or hp.shape == (4, 1)
return (T @ hp)[0:3]
def tf_decompose(T):
""" Decompose into rotation matrix and translation vector"""
assert T.shape == (4, 4)
C = tf_rot(T)
r = tf_trans(T)
return (C, r)
def tf_lerp(pose_i, pose_j, t):
""" Interpolate pose `pose_i` and `pose_j` with parameter `t` """
assert pose_i.shape == (4, 4)
assert pose_j.shape == (4, 4)
assert t >= 0.0 and t <= 1.0
# Decompose start pose
r_i = tf_trans(pose_i)
q_i = tf_quat(pose_i)
# Decompose end pose
r_j = tf_trans(pose_j)
q_j = tf_quat(pose_j)
# Interpolate translation and rotation
r_lerp = lerp(r_i, r_j, t)
q_lerp = quat_slerp(q_i, q_j, t)
return tf(q_lerp, r_lerp)
def tf_perturb(T, i, step_size):
""" Perturb transformation matrix """
assert T.shape == (4, 4)
assert i >= 0 and i <= 5
# Setup
C = tf_rot(T)
r = tf_trans(T)
if i >= 0 and i <= 2:
# Perturb translation
r[i] += step_size
elif i >= 3 and i <= 5:
# Perturb rotation
rvec = np.array([0.0, 0.0, 0.0])
rvec[i - 3] = step_size
q = rot2quat(C)
dq = quat_delta(rvec)
q_diff = quat_mul(q, dq)
q_diff = quat_normalize(q_diff)
C = quat2rot(q_diff)
return tf(C, r)
def tf_update(T, dx):
""" Update transformation matrix """
assert T.shape == (4, 4)
q = tf_quat(T)
r = tf_trans(T)
dr = dx[0:3]
dalpha = dx[3:6]
dq = quat_delta(dalpha)
return tf(quat_mul(q, dq), r + dr)
###############################################################################
# MATPLOTLIB
###############################################################################
import matplotlib.pylab as plt
def plot_set_axes_equal(ax):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def plot_tf(ax, T, **kwargs):
"""
Plot 4x4 Homogeneous Transform
Args:
ax (matplotlib.axes.Axes): Plot axes object
T (np.array): 4x4 homogeneous transform (i.e. Pose in the world frame)
Keyword args:
size (float): Size of the coordinate-axes
linewidth (float): Thickness of the coordinate-axes
name (str): Frame name
name_offset (np.array or list): Position offset for displaying the frame's name
fontsize (float): Frame font size
fontweight (float): Frame font weight
"""
assert T.shape == (4, 4)
size = kwargs.get('size', 1)
# linewidth = kwargs.get('linewidth', 3)
name = kwargs.get('name', None)
name_offset = kwargs.get('name_offset', [0, 0, -0.01])
fontsize = kwargs.get('fontsize', 10)
fontweight = kwargs.get('fontweight', 'bold')
colors = kwargs.get('colors', ['r-', 'g-', 'b-'])
origin = tf_trans(T)
lx = tf_point(T, np.array([size, 0.0, 0.0]))
ly = tf_point(T, np.array([0.0, size, 0.0]))
lz = tf_point(T, np.array([0.0, 0.0, size]))
# Draw x-axis
px = [origin[0], lx[0]]
py = [origin[1], lx[1]]
pz = [origin[2], lx[2]]
ax.plot(px, py, pz, colors[0])
# Draw y-axis
px = [origin[0], ly[0]]
py = [origin[1], ly[1]]
pz = [origin[2], ly[2]]
ax.plot(px, py, pz, colors[1])
# Draw z-axis
px = [origin[0], lz[0]]
py = [origin[1], lz[1]]
pz = [origin[2], lz[2]]
ax.plot(px, py, pz, colors[2])
# Draw label
if name is not None:
x = origin[0] + name_offset[0]
y = origin[1] + name_offset[1]
z = origin[2] + name_offset[2]
ax.text(x, y, z, name, fontsize=fontsize, fontweight=fontweight)
def plot_xyz(title, data, key_time, key_x, key_y, key_z, ylabel):
"""
Plot XYZ plot
Args:
title (str): Plot title
data (Dict[str, pandas.DataFrame]): Plot data
key_time (str): Dictionary key for timestamps
key_x (str): Dictionary key x-axis
key_y (str): Dictionary key y-axis
key_z (str): Dictionary key z-axis
ylabel (str): Y-axis label
"""
axis = ['x', 'y', 'z']
colors = ["r", "g", "b"]
keys = [key_x, key_y, key_z]
line_styles = ["--", "-", "x"]
# Time
time_data = {}
for label, series_data in data.items():
ts0 = series_data[key_time][0]
time_data[label] = ts2sec(series_data[key_time].to_numpy() - ts0)
# Plot subplots
plt.figure()
for i in range(3):
plt.subplot(3, 1, i + 1)
for (label, series_data), line in zip(data.items(), line_styles):
line_style = colors[i] + line
x_data = time_data[label]
y_data = series_data[keys[i]].to_numpy()
plt.plot(x_data, y_data, line_style, label=label)
plt.xlabel("Time [s]")
plt.ylabel(ylabel)
plt.legend(loc=0)
plt.title(f"{title} in {axis[i]}-axis")
plt.subplots_adjust(hspace=0.65)
###############################################################################
# CV
###############################################################################
# UTILS #######################################################################
def lookat(cam_pos, target_pos, **kwargs):
""" Form look at matrix """
up_axis = kwargs.get('up_axis', np.array([0.0, -1.0, 0.0]))
assert len(cam_pos) == 3
assert len(target_pos) == 3
assert len(up_axis) == 3
# Note: If we were using OpenGL the cam_dir would be the opposite direction,
# since in OpenGL the camera forward is -z. In robotics however our camera is
# +z forward.
cam_z = normalize(target_pos - cam_pos)
cam_x = normalize(cross(up_axis, cam_z))
cam_y = cross(cam_z, cam_x)
T_WC = zeros((4, 4))
T_WC[0:3, 0] = cam_x.T
T_WC[0:3, 1] = cam_y.T
T_WC[0:3, 2] = cam_z.T
T_WC[0:3, 3] = cam_pos
T_WC[3, 3] = 1.0
return T_WC
# GEOMETRY ####################################################################
def linear_triangulation(P_i, P_j, z_i, z_j):
"""
Linear triangulation
This function is used to triangulate a single 3D point observed by two
camera frames (be it in time with the same camera, or two different cameras
with known extrinsics).
Args:
P_i (np.array): First camera 3x4 projection matrix
P_j (np.array): Second camera 3x4 projection matrix
z_i (np.array): First keypoint measurement
z_j (np.array): Second keypoint measurement
Returns:
p_Ci (np.array): 3D point w.r.t first camera
"""
# First three rows of P_i and P_j
P1T_i = P_i[0, :]
P2T_i = P_i[1, :]
P3T_i = P_i[2, :]
P1T_j = P_j[0, :]
P2T_j = P_j[1, :]
P3T_j = P_j[2, :]
# Image point from the first and second frame
x_i, y_i = z_i
x_j, y_j = z_j
# Form the A matrix of AX = 0
A = zeros((4, 4))
A[0, :] = x_i * P3T_i - P1T_i
A[1, :] = y_i * P3T_i - P2T_i
A[2, :] = x_j * P3T_j - P1T_j
A[3, :] = y_j * P3T_j - P2T_j
# Use SVD to solve AX = 0
(_, _, Vh) = svd(A.T @ A)
hp = Vh.T[:, -1] # Get the best result from SVD (last column of V)
hp = hp / hp[-1] # Normalize the homogeneous 3D point
p = hp[0:3] # Return only the first three components (x, y, z)
return p
# PINHOLE #####################################################################
def focal_length(image_width, fov_deg):
"""
Estimated focal length based on `image_width` and field of fiew `fov_deg`
in degrees.
"""
return (image_width / 2.0) / tan(deg2rad(fov_deg / 2.0))
def pinhole_K(params):
""" Form camera matrix K """
fx, fy, cx, cy = params
return np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
def pinhole_P(params, T_WC):
""" Form 3x4 projection matrix P """
K = pinhole_K(params)
T_CW = inv(T_WC)
C = tf_rot(T_CW)
r = tf_trans(T_CW)
P = zeros((3, 4))
P[0:3, 0:3] = C
P[0:3, 3] = r
P = K @ P
return P
def pinhole_project(proj_params, p_C):
""" Project 3D point onto image plane using pinhole camera model """
assert len(proj_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Scale and center
fx, fy, cx, cy = proj_params
z = np.array([fx * x[0] + cx, fy * x[1] + cy])
return z
def pinhole_params_jacobian(x):
""" Form pinhole parameter jacobian """
return np.array([[x[0], 0.0, 1.0, 0.0], [0.0, x[1], 0.0, 1.0]])
def pinhole_point_jacobian(proj_params):
""" Form pinhole point jacobian """
fx, fy, _, _ = proj_params
return np.array([[fx, 0.0], [0.0, fy]])
# RADTAN4 #####################################################################
def radtan4_distort(dist_params, p):
""" Distort point with Radial-Tangential distortion """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, p1, p2 = dist_params
# Point
x, y = p
# Apply radial distortion
x2 = x * x
y2 = y * y
r2 = x2 + y2
r4 = r2 * r2
radial_factor = 1.0 + (k1 * r2) + (k2 * r4)
x_dash = x * radial_factor
y_dash = y * radial_factor
# Apply tangential distortion
xy = x * y
x_ddash = x_dash + (2.0 * p1 * xy + p2 * (r2 + 2.0 * x2))
y_ddash = y_dash + (p1 * (r2 + 2.0 * y2) + 2.0 * p2 * xy)
return np.array([x_ddash, y_ddash])
def radtan4_point_jacobian(dist_params, p):
""" Radial-tangential point jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, p1, p2 = dist_params
# Point
x, y = p
# Apply radial distortion
x2 = x * x
y2 = y * y
r2 = x2 + y2
r4 = r2 * r2
# Point Jacobian
# Let u = [x; y] normalized point
# Let u' be the distorted u
# The jacobian of u' w.r.t. u (or du'/du) is:
J_point = zeros((2, 2))
J_point[0, 0] = k1 * r2 + k2 * r4 + 2.0 * p1 * y + 6.0 * p2 * x
J_point[0, 0] += x * (2.0 * k1 * x + 4.0 * k2 * x * r2) + 1.0
J_point[1, 0] = 2.0 * p1 * x + 2.0 * p2 * y
J_point[1, 0] += y * (2.0 * k1 * x + 4.0 * k2 * x * r2)
J_point[0, 1] = J_point[1, 0]
J_point[1, 1] = k1 * r2 + k2 * r4 + 6.0 * p1 * y + 2.0 * p2 * x
J_point[1, 1] += y * (2.0 * k1 * y + 4.0 * k2 * y * r2) + 1.0
# Above is generated using sympy
return J_point
def radtan4_undistort(dist_params, p0):
""" Un-distort point with Radial-Tangential distortion """
assert len(dist_params) == 4
assert len(p0) == 2
# Undistort
p = p0
max_iter = 5
for _ in range(max_iter):
# Error
p_distorted = radtan4_distort(dist_params, p)
J = radtan4_point_jacobian(dist_params, p)
err = (p0 - p_distorted)
# Update
# dp = inv(J' * J) * J' * err
dp = pinv(J) @ err
p = p + dp
# Check threshold
if (err.T @ err) < 1e-15:
break
return p
def radtan4_params_jacobian(dist_params, p):
""" Radial-Tangential distortion parameter jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Point
x, y = p
# Setup
x2 = x * x
y2 = y * y
xy = x * y
r2 = x2 + y2
r4 = r2 * r2
# Params Jacobian
J_params = zeros((2, 4))
J_params[0, 0] = x * r2
J_params[0, 1] = x * r4
J_params[0, 2] = 2.0 * xy
J_params[0, 3] = 3.0 * x2 + y2
J_params[1, 0] = y * r2
J_params[1, 1] = y * r4
J_params[1, 2] = x2 + 3.0 * y2
J_params[1, 3] = 2.0 * xy
return J_params
# EQUI4 #######################################################################
def equi4_distort(dist_params, p):
""" Distort point with Equi-distant distortion """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, k3, k4 = dist_params
# Distort
x, y = p
r = sqrt(x * x + y * y)
th = math.atan(r)
th2 = th * th
th4 = th2 * th2
th6 = th4 * th2
th8 = th4 * th4
thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
s = thd / r
x_dash = s * x
y_dash = s * y
return np.array([x_dash, y_dash])
def equi4_undistort(dist_params, p):
""" Undistort point using Equi-distant distortion """
thd = sqrt(p(0) * p(0) + p[0] * p[0])
# Distortion parameters
k1, k2, k3, k4 = dist_params
th = thd # Initial guess
for _ in range(20):
th2 = th * th
th4 = th2 * th2
th6 = th4 * th2
th8 = th4 * th4
th = thd / (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
scaling = tan(th) / thd
return np.array([p[0] * scaling, p[1] * scaling])
def equi4_params_jacobian(dist_params, p):
""" Equi-distant distortion params jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Jacobian
x, y = p
r = sqrt(x**2 + y**2)
th = atan(r)
J_params = zeros((2, 4))
J_params[0, 0] = x * th**3 / r
J_params[0, 1] = x * th**5 / r
J_params[0, 2] = x * th**7 / r
J_params[0, 3] = x * th**9 / r
J_params[1, 0] = y * th**3 / r
J_params[1, 1] = y * th**5 / r
J_params[1, 2] = y * th**7 / r
J_params[1, 3] = y * th**9 / r
return J_params
def equi4_point_jacobian(dist_params, p):
""" Equi-distant distortion point jacobian """
assert len(dist_params) == 4
assert len(p) == 2
# Distortion parameters
k1, k2, k3, k4 = dist_params
# Jacobian
x, y = p
r = sqrt(x**2 + y**2)
th = math.atan(r)
th2 = th**2
th4 = th**4
th6 = th**6
th8 = th**8
thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8)
th_r = 1.0 / (r * r + 1.0)
thd_th = 1.0 + 3.0 * k1 * th2
thd_th += 5.0 * k2 * th4
thd_th += 7.0 * k3 * th6
thd_th += 9.0 * k4 * th8
s = thd / r
s_r = thd_th * th_r / r - thd / (r * r)
r_x = 1.0 / r * x
r_y = 1.0 / r * y
J_point = zeros((2, 2))
J_point[0, 0] = s + x * s_r * r_x
J_point[0, 1] = x * s_r * r_y
J_point[1, 0] = y * s_r * r_x
J_point[1, 1] = s + y * s_r * r_y
return J_point
# PINHOLE RADTAN4 #############################################################
def pinhole_radtan4_project(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Distort
x_dist = radtan4_distort(dist_params, x)
# Scale and center to image plane
fx, fy, cx, cy = proj_params
z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy])
return z
def pinhole_radtan4_backproject(proj_params, dist_params, z):
""" Pinhole + Radial-Tangential back-project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Convert image pixel coordinates to normalized retinal coordintes
fx, fy, cx, cy = proj_params
x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0])
# Undistort
x = radtan4_undistort(dist_params, x)
# 3D ray
p = np.array([x[0], x[1], 1.0])
return p
def pinhole_radtan4_undistort(proj_params, dist_params, z):
""" Pinhole + Radial-Tangential undistort """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Back project and undistort
fx, fy, cx, cy = proj_params
p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy])
p_undist = radtan4_undistort(dist_params, p)
# Project undistorted point to image plane
return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy])
def pinhole_radtan4_project_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential project jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project 3D point
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Jacobian
J_proj = zeros((2, 3))
J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2]
J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2]
J_dist_point = radtan4_point_jacobian(dist_params, x)
J_proj_point = pinhole_point_jacobian(proj_params)
return J_proj_point @ J_dist_point @ J_proj
def pinhole_radtan4_params_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Radial-Tangential params jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point
x_dist = radtan4_distort(dist_params, x) # Distort point
J_proj_point = pinhole_point_jacobian(proj_params)
J_dist_params = radtan4_params_jacobian(dist_params, x)
J = zeros((2, 8))
J[0:2, 0:4] = pinhole_params_jacobian(x_dist)
J[0:2, 4:8] = J_proj_point @ J_dist_params
return J
# PINHOLE EQUI4 ###############################################################
def pinhole_equi4_project(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Distort
x_dist = equi4_distort(dist_params, x)
# Scale and center to image plane
fx, fy, cx, cy = proj_params
z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy])
return z
def pinhole_equi4_backproject(proj_params, dist_params, z):
""" Pinhole + Equi-distant back-project """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Convert image pixel coordinates to normalized retinal coordintes
fx, fy, cx, cy = proj_params
x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0])
# Undistort
x = equi4_undistort(dist_params, x)
# 3D ray
p = np.array([x[0], x[1], 1.0])
return p
def pinhole_equi4_undistort(proj_params, dist_params, z):
""" Pinhole + Equi-distant undistort """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(z) == 2
# Back project and undistort
fx, fy, cx, cy = proj_params
p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy])
p_undist = equi4_undistort(dist_params, p)
# Project undistorted point to image plane
return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy])
def pinhole_equi4_project_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant project jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
# Project 3D point
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]])
# Jacobian
J_proj = zeros((2, 3))
J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2]
J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2]
J_dist_point = equi4_point_jacobian(dist_params, x)
J_proj_point = pinhole_point_jacobian(proj_params)
return J_proj_point @ J_dist_point @ J_proj
def pinhole_equi4_params_jacobian(proj_params, dist_params, p_C):
""" Pinhole + Equi-distant params jacobian """
assert len(proj_params) == 4
assert len(dist_params) == 4
assert len(p_C) == 3
x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point
x_dist = equi4_distort(dist_params, x) # Distort point
J_proj_point = pinhole_point_jacobian(proj_params)
J_dist_params = equi4_params_jacobian(dist_params, x)
J = zeros((2, 8))
J[0:2, 0:4] = pinhole_params_jacobian(x_dist)
J[0:2, 4:8] = J_proj_point @ J_dist_params
return J
# CAMERA GEOMETRY #############################################################
@dataclass
class CameraGeometry:
""" Camera Geometry """
cam_idx: int
resolution: tuple
proj_model: str
dist_model: str
proj_params_size: int
dist_params_size: int
project_fn: FunctionType
backproject_fn: FunctionType
undistort_fn: FunctionType
J_proj_fn: FunctionType
J_params_fn: FunctionType
def get_proj_params_size(self):
""" Return projection parameter size """
return self.proj_params_size
def get_dist_params_size(self):
""" Return distortion parameter size """
return self.dist_params_size
def get_params_size(self):
""" Return parameter size """
return self.get_proj_params_size() + self.get_dist_params_size()
def proj_params(self, params):
""" Extract projection parameters """
return params[:self.proj_params_size]
def dist_params(self, params):
""" Extract distortion parameters """
return params[-self.dist_params_size:]
def project(self, params, p_C):
""" Project point `p_C` with camera parameters `params` """
# Project
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
z = self.project_fn(proj_params, dist_params, p_C)
# Make sure point is infront of camera
if p_C[2] < 0.0:
return False, z
# Make sure image point is within image bounds
x_ok = z[0] >= 0.0 and z[0] <= self.resolution[0]
y_ok = z[1] >= 0.0 and z[1] <= self.resolution[1]
if x_ok and y_ok:
return True, z
return False, z
def backproject(self, params, z):
""" Back-project image point `z` with camera parameters `params` """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.project_fn(proj_params, dist_params, z)
def undistort(self, params, z):
""" Undistort image point `z` with camera parameters `params` """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.undistort_fn(proj_params, dist_params, z)
def J_proj(self, params, p_C):
""" Form Jacobian w.r.t. p_C """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.J_proj_fn(proj_params, dist_params, p_C)
def J_params(self, params, p_C):
""" Form Jacobian w.r.t. camera parameters """
proj_params = params[:self.proj_params_size]
dist_params = params[-self.dist_params_size:]
return self.J_params_fn(proj_params, dist_params, p_C)
def pinhole_radtan4_setup(cam_idx, cam_res):
""" Setup Pinhole + Radtan4 camera geometry """
return CameraGeometry(
cam_idx, cam_res, "pinhole", "radtan4", 4, 4, pinhole_radtan4_project,
pinhole_radtan4_backproject, pinhole_radtan4_undistort,
pinhole_radtan4_project_jacobian, pinhole_radtan4_params_jacobian)
def pinhole_equi4_setup(cam_idx, cam_res):
""" Setup Pinhole + Equi camera geometry """
return CameraGeometry(cam_idx, cam_res, "pinhole", "equi4", 4, 4,
pinhole_equi4_project, pinhole_equi4_backproject,
pinhole_equi4_undistort, pinhole_equi4_project_jacobian,
pinhole_equi4_params_jacobian)
def camera_geometry_setup(cam_idx, cam_res, proj_model, dist_model):
""" Setup camera geometry """
if proj_model == "pinhole" and dist_model == "radtan4":
return pinhole_radtan4_setup(cam_idx, cam_res)
elif proj_model == "pinhole" and dist_model == "equi4":
return pinhole_equi4_setup(cam_idx, cam_res)
else:
raise RuntimeError(f"Unrecognized [{proj_model}]-[{dist_model}] combo!")
################################################################################
# DATASET
################################################################################
# TIMELINE######################################################################
@dataclass
class CameraEvent:
""" Camera Event """
ts: int
cam_idx: int
image: np.array
@dataclass
class ImuEvent:
""" IMU Event """
ts: int
imu_idx: int
acc: np.array
gyr: np.array
@dataclass
class Timeline:
""" Timeline """
def __init__(self):
self.data = {}
def num_timestamps(self):
""" Return number of timestamps """
return len(self.data)
def num_events(self):
""" Return number of events """
nb_events = 0
for _, events in self.data:
nb_events += len(events)
return nb_events
def get_timestamps(self):
""" Get timestamps """
return sorted(list(self.data.keys()))
def add_event(self, ts, event):
""" Add event """
if ts not in self.data:
self.data[ts] = [event]
else:
self.data[ts].append(event)
def get_events(self, ts):
""" Get events """
return self.data[ts]
# EUROC ########################################################################
class EurocSensor:
""" Euroc Sensor """
def __init__(self, yaml_path):
# Load yaml file
config = load_yaml(yaml_path)
# General sensor definitions.
self.sensor_type = config.sensor_type
self.comment = config.comment
# Sensor extrinsics wrt. the body-frame.
self.T_BS = np.array(config.T_BS.data).reshape((4, 4))
# Camera specific definitions.
if config.sensor_type == "camera":
self.rate_hz = config.rate_hz
self.resolution = config.resolution
self.camera_model = config.camera_model
self.intrinsics = config.intrinsics
self.distortion_model = config.distortion_model
self.distortion_coefficients = config.distortion_coefficients
elif config.sensor_type == "imu":
self.rate_hz = config.rate_hz
self.gyro_noise_density = config.gyroscope_noise_density
self.gyro_random_walk = config.gyroscope_random_walk
self.accel_noise_density = config.accelerometer_noise_density
self.accel_random_walk = config.accelerometer_random_walk
class EurocImuData:
""" Euroc Imu data """
def __init__(self, data_dir):
self.imu_dir = Path(data_dir, 'mav0', 'imu0')
self.config = EurocSensor(Path(self.imu_dir, 'sensor.yaml'))
self.timestamps = []
self.acc = {}
self.gyr = {}
# Load data
df = pandas.read_csv(Path(self.imu_dir, 'data.csv'))
df = df.rename(columns=lambda x: x.strip())
# -- Timestamp
timestamps = df['#timestamp [ns]'].to_numpy()
# -- Accelerometer measurement
acc_x = df['a_RS_S_x [m s^-2]'].to_numpy()
acc_y = df['a_RS_S_y [m s^-2]'].to_numpy()
acc_z = df['a_RS_S_z [m s^-2]'].to_numpy()
# -- Gyroscope measurement
gyr_x = df['w_RS_S_x [rad s^-1]'].to_numpy()
gyr_y = df['w_RS_S_y [rad s^-1]'].to_numpy()
gyr_z = df['w_RS_S_z [rad s^-1]'].to_numpy()
# -- Load
for i, ts in enumerate(timestamps):
self.timestamps.append(ts)
self.acc[ts] = np.array([acc_x[i], acc_y[i], acc_z[i]])
self.gyr[ts] = np.array([gyr_x[i], gyr_y[i], gyr_z[i]])
class EurocCameraData:
""" Euroc Camera data """
def __init__(self, data_dir, cam_idx):
self.cam_idx = cam_idx
self.cam_dir = Path(data_dir, 'mav0', 'cam' + str(cam_idx))
self.config = EurocSensor(Path(self.cam_dir, 'sensor.yaml'))
self.timestamps = []
self.image_paths = {}
# Load image paths
cam_data_dir = str(Path(self.cam_dir, 'data', '*.png'))
for img_file in sorted(glob.glob(cam_data_dir)):
ts_str, _ = os.path.basename(img_file).split('.')
ts = int(ts_str)
self.timestamps.append(ts)
self.image_paths[ts] = img_file
def get_image_path_list(self):
""" Return list of image paths """
return [img_path for _, img_path in self.image_paths]
class EurocGroundTruth:
""" Euroc ground truth """
def __init__(self, data_dir):
self.timestamps = []
self.T_WB = {}
self.v_WB = {}
self.w_WB = {}
self.a_WB = {}
# Load data
dir_name = 'state_groundtruth_estimate0'
data_csv = Path(data_dir, 'mav0', dir_name, 'data.csv')
df = pandas.read_csv(data_csv)
df = df.rename(columns=lambda x: x.strip())
# -- Timestamp
timestamps = df['#timestamp'].to_numpy()
# -- Body pose in world frame
rx_list = df['p_RS_R_x [m]'].to_numpy()
ry_list = df['p_RS_R_y [m]'].to_numpy()
rz_list = df['p_RS_R_z [m]'].to_numpy()
qw_list = df['q_RS_w []'].to_numpy()
qx_list = df['q_RS_x []'].to_numpy()
qy_list = df['q_RS_y []'].to_numpy()
qz_list = df['q_RS_z []'].to_numpy()
# -- Body velocity in world frame
vx_list = df['v_RS_R_x [m s^-1]'].to_numpy()
vy_list = df['v_RS_R_y [m s^-1]'].to_numpy()
vz_list = df['v_RS_R_z [m s^-1]'].to_numpy()
# -- Add to class
for i, ts in enumerate(timestamps):
r_WB = np.array([rx_list[i], ry_list[i], rz_list[i]])
q_WB = np.array([qw_list[i], qx_list[i], qy_list[i], qz_list[i]])
v_WB = np.array([vx_list[i], vy_list[i], vz_list[i]])
self.timestamps.append(ts)
self.T_WB[ts] = tf(q_WB, r_WB)
self.v_WB[ts] = v_WB
class EurocDataset:
""" Euroc Dataset """
def __init__(self, data_path):
# Data path
self.data_path = data_path
if os.path.isdir(data_path) is False:
raise RuntimeError(f"Path {data_path} does not exist!")
# Data
self.imu0_data = EurocImuData(self.data_path)
self.cam0_data = EurocCameraData(self.data_path, 0)
self.cam1_data = EurocCameraData(self.data_path, 1)
self.ground_truth = EurocGroundTruth(self.data_path)
self.timeline = self._form_timeline()
def _form_timeline(self):
timeline = Timeline()
# Form timeline
# -- Add imu0 events
for ts in self.imu0_data.timestamps:
acc = self.imu0_data.acc[ts]
gyr = self.imu0_data.gyr[ts]
timeline.add_event(ts, ImuEvent(ts, 0, acc, gyr))
# -- Add cam0 events
for ts, img_path in self.cam0_data.image_paths.items():
timeline.add_event(ts, CameraEvent(ts, 0, img_path))
# -- Add cam1 events
for ts, img_path in self.cam1_data.image_paths.items():
timeline.add_event(ts, CameraEvent(ts, 1, img_path))
return timeline
def get_camera_image(self, cam_idx, ts):
""" Get camera image """
img_path = None
if cam_idx == 0:
img_path = self.cam0_data.image_paths[ts]
elif cam_idx == 1:
img_path = self.cam1_data.image_paths[ts]
else:
raise RuntimeError("cam_idx has to be 0 or 1")
return cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
def get_ground_truth_pose(self, ts):
""" Get ground truth pose T_WB at timestamp `ts` """
# Pre-check
if ts <= self.ground_truth.timestamps[0]:
return None
elif ts >= self.ground_truth.timestamps[-1]:
return None
# Loop throught timestamps
for k, ground_truth_ts in enumerate(self.ground_truth.timestamps):
if ts == ground_truth_ts:
return self.ground_truth.T_WB[ts]
elif self.ground_truth.timestamps[k] > ts:
ts_i = self.ground_truth.timestamps[k - 1]
ts_j = self.ground_truth.timestamps[k]
alpha = float(ts_j - ts) / float(ts_j - ts_i)
pose_i = self.ground_truth.T_WB[ts_i]
pose_j = self.ground_truth.T_WB[ts_j]
return tf_lerp(pose_i, pose_j, alpha)
return None
# KITTI #######################################################################
class KittiCameraData:
""" KittiCameraDataset """
def __init__(self, cam_idx, seq_path):
self.cam_idx = cam_idx
self.seq_path = seq_path
self.cam_path = Path(self.seq_path, "image_" + str(self.cam_idx).zfill(2))
self.img_dir = Path(self.cam_path, "data")
self.img_paths = sorted(glob.glob(str(Path(self.img_dir, "*.png"))))
class KittiRawDataset:
""" KittiRawDataset """
def __init__(self, data_dir, date, seq, is_sync):
# Paths
self.data_dir = data_dir
self.date = date
self.seq = seq.zfill(4)
self.sync = "sync" if is_sync else "extract"
self.seq_name = "_".join([self.date, "drive", self.seq, self.sync])
self.seq_path = Path(self.data_dir, self.date, self.seq_name)
# Camera data
self.cam0_data = KittiCameraData(0, self.seq_path)
self.cam1_data = KittiCameraData(1, self.seq_path)
self.cam2_data = KittiCameraData(2, self.seq_path)
self.cam3_data = KittiCameraData(3, self.seq_path)
# Calibration
calib_cam_to_cam_filepath = Path(self.data_dir, "calib_cam_to_cam.txt")
calib_imu_to_velo_filepath = Path(self.data_dir, "calib_imu_to_velo.txt")
calib_velo_to_cam_filepath = Path(self.data_dir, "calib_velo_to_cam.txt")
self.calib_cam_to_cam = self._read_calib_file(calib_cam_to_cam_filepath)
self.calib_imu_to_velo = self._read_calib_file(calib_imu_to_velo_filepath)
self.calib_velo_to_cam = self._read_calib_file(calib_velo_to_cam_filepath)
@classmethod
def _read_calib_file(cls, fp):
data = {}
with open(fp, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def nb_camera_images(self, cam_idx=0):
""" Return number of camera images """
assert cam_idx >= 0 and cam_idx <= 3
if cam_idx == 0:
return len(self.cam0_data.img_paths)
elif cam_idx == 1:
return len(self.cam1_data.img_paths)
elif cam_idx == 2:
return len(self.cam2_data.img_paths)
elif cam_idx == 3:
return len(self.cam3_data.img_paths)
return None
def get_velodyne_extrinsics(self):
""" Get velodyne extrinsics """
# Form imu-velo extrinsics T_BV
C_VB = self.calib_imu_to_velo['R'].reshape((3, 3))
r_VB = self.calib_imu_to_velo['T']
T_VB = tf(C_VB, r_VB)
T_BV = inv(T_VB)
return T_BV
def get_camera_extrinsics(self, cam_idx):
""" Get camera extrinsics T_BCi """
# Form imu-velo extrinsics T_VB
C_VB = self.calib_imu_to_velo['R'].reshape((3, 3))
r_VB = self.calib_imu_to_velo['T']
T_VB = tf(C_VB, r_VB)
# Form velo-cam extrinsics T_C0V
C_C0V = self.calib_velo_to_cam['R'].reshape((3, 3))
r_C0V = self.calib_velo_to_cam['T']
T_C0V = tf(C_C0V, r_C0V)
# Form cam-cam extrinsics T_CiC0
cam_str = str(cam_idx)
C_CiC0 = self.calib_cam_to_cam['R_' + cam_str.zfill(2)].reshape((3, 3))
r_CiC0 = self.calib_cam_to_cam['T_' + cam_str.zfill(2)]
T_CiC0 = tf(C_CiC0, r_CiC0)
# Form camera extrinsics T_BC0
T_CiB = T_CiC0 @ T_C0V @ T_VB
T_BCi = inv(T_CiB)
return T_BCi
def get_camera_image(self, cam_idx, **kwargs):
""" Get camera image """
assert cam_idx >= 0 and cam_idx <= 3
imread_flag = kwargs.get('imread_flag', cv2.IMREAD_GRAYSCALE)
img_idx = kwargs['index']
if cam_idx == 0:
return cv2.imread(self.cam0_data.img_paths[img_idx], imread_flag)
elif cam_idx == 1:
return cv2.imread(self.cam1_data.img_paths[img_idx], imread_flag)
elif cam_idx == 2:
return cv2.imread(self.cam2_data.img_paths[img_idx], imread_flag)
elif cam_idx == 3:
return cv2.imread(self.cam3_data.img_paths[img_idx], imread_flag)
return None
def plot_frames(self):
""" Plot Frames """
T_BV = self.get_velodyne_extrinsics()
T_BC0 = self.get_camera_extrinsics(0)
T_BC1 = self.get_camera_extrinsics(1)
T_BC2 = self.get_camera_extrinsics(2)
T_BC3 = self.get_camera_extrinsics(3)
plt.figure()
ax = plt.axes(projection='3d')
plot_tf(ax, eye(4), size=0.1, name="imu")
plot_tf(ax, T_BV, size=0.1, name="velo")
plot_tf(ax, T_BC0, size=0.1, name="cam0")
plot_tf(ax, T_BC1, size=0.1, name="cam1")
plot_tf(ax, T_BC2, size=0.1, name="cam2")
plot_tf(ax, T_BC3, size=0.1, name="cam3")
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
plot_set_axes_equal(ax)
plt.show()
###############################################################################
# FILTER
###############################################################################
def compl_filter(gyro, accel, dt, roll, pitch):
"""
A simple complementary filter that uses `gyro` and `accel` measurements to
estimate the attitude in `roll` and `pitch`. Where `dt` is the update
rate of the `gyro` measurements in seconds.
"""
# Calculate pitch and roll using gyroscope
wx, wy, _ = gyro
gyro_roll = (wx * dt) + roll
gyro_pitch = (wy * dt) + pitch
# Calculate pitch and roll using accelerometer
ax, ay, az = accel
accel_roll = (atan(ay / sqrt(ax * ay + az * az))) * 180.0 / pi
accel_pitch = (atan(ax / sqrt(ay * ay + az * az))) * 180.0 / pi
# Complimentary filter
pitch = (0.98 * gyro_pitch) + (0.02 * accel_pitch)
roll = (0.98 * gyro_roll) + (0.02 * accel_roll)
return (roll, pitch)
###############################################################################
# STATE ESTIMATION
###############################################################################
# STATE VARIABLES #############################################################
@dataclass
class StateVariable:
""" State variable """
ts: int
var_type: str
param: np.array
parameterization: str
min_dims: int
fix: bool
data: Optional[dict] = None
param_id: int = None
def set_param_id(self, pid):
""" Set parameter id """
self.param_id = pid
class StateVariableType(Enum):
""" State Variable Type """
POSE = 1
EXTRINSICS = 2
FEATURE = 3
CAMERA = 4
SPEED_AND_BIASES = 5
class FeatureMeasurements:
""" Feature measurements """
def __init__(self):
self._init = False
self._data = {}
def initialized(self):
""" Check if feature is initialized """
return self._init
def has_overlap(self, ts):
""" Check if feature has overlap at timestamp `ts` """
return len(self._data[ts]) > 1
def set_initialized(self):
""" Set feature as initialized """
self._init = True
def update(self, ts, cam_idx, z):
""" Add feature measurement """
assert len(z) == 2
if ts not in self._data:
self._data[ts] = {}
self._data[ts][cam_idx] = z
def get(self, ts, cam_idx):
""" Get feature measurement """
return self._data[ts][cam_idx]
def get_overlaps(self, ts):
""" Get feature overlaps """
overlaps = []
for cam_idx, z in self._data[ts].items():
overlaps.append((cam_idx, z))
return overlaps
def tf2pose(T):
""" Form pose vector """
rx, ry, rz = tf_trans(T)
qw, qx, qy, qz = tf_quat(T)
return np.array([rx, ry, rz, qx, qy, qz, qw])
def pose2tf(pose_vec):
""" Convert pose vector to transformation matrix """
rx, ry, rz = pose_vec[0:3]
qx, qy, qz, qw = pose_vec[3:7]
return tf(np.array([qw, qx, qy, qz]), np.array([rx, ry, rz]))
def pose_setup(ts, param, **kwargs):
""" Form pose state-variable """
fix = kwargs.get('fix', False)
param = tf2pose(param) if param.shape == (4, 4) else param
return StateVariable(ts, "pose", param, None, 6, fix)
def extrinsics_setup(param, **kwargs):
""" Form extrinsics state-variable """
fix = kwargs.get('fix', False)
param = tf2pose(param) if param.shape == (4, 4) else param
return StateVariable(None, "extrinsics", param, None, 6, fix)
def camera_params_setup(cam_idx, res, proj_model, dist_model, param, **kwargs):
""" Form camera parameters state-variable """
fix = kwargs.get('fix', False)
data = camera_geometry_setup(cam_idx, res, proj_model, dist_model)
return StateVariable(None, "camera", param, None, len(param), fix, data)
def feature_setup(param, **kwargs):
""" Form feature state-variable """
fix = kwargs.get('fix', False)
data = FeatureMeasurements()
return StateVariable(None, "feature", param, None, len(param), fix, data)
def speed_biases_setup(ts, vel, ba, bg, **kwargs):
""" Form speed and biases state-variable """
fix = kwargs.get('fix', False)
param = np.block([vel, ba, bg])
return StateVariable(ts, "speed_and_biases", param, None, len(param), fix)
def perturb_state_variable(sv, i, step_size):
""" Perturb state variable """
if sv.var_type == "pose" or sv.var_type == "extrinsics":
T = pose2tf(sv.param)
T_dash = tf_perturb(T, i, step_size)
sv.param = tf2pose(T_dash)
else:
sv.param[i] += step_size
return sv
def update_state_variable(sv, dx):
""" Update state variable """
if sv.var_type == "pose" or sv.var_type == "extrinsics":
T = pose2tf(sv.param)
T_prime = tf_update(T, dx)
sv.param = tf2pose(T_prime)
else:
sv.param += dx
# FACTORS ######################################################################
class Factor:
""" Factor """
def __init__(self, ftype, pids, z, covar):
self.factor_id = None
self.factor_type = ftype
self.param_ids = pids
self.measurement = z
self.covar = covar
self.sqrt_info = chol(inv(self.covar)).T
def set_factor_id(self, fid):
""" Set factor id """
self.factor_id = fid
class PoseFactor(Factor):
""" Pose Factor """
def __init__(self, pids, z, covar):
assert len(pids) == 1
assert z.shape == (4, 4)
assert covar.shape == (6, 6)
Factor.__init__(self, "PoseFactor", pids, z, covar)
def eval(self, params, **kwargs):
""" Evaluate """
assert len(params) == 1
assert len(params[0]) == 7
# Measured pose
T_meas = self.measurement
q_meas = tf_quat(T_meas)
r_meas = tf_trans(T_meas)
# Estimated pose
T_est = pose2tf(params[0])
q_est = tf_quat(T_est)
r_est = tf_trans(T_est)
# Form residuals (pose - pose_est)
dr = r_meas - r_est
dq = quat_mul(quat_inv(q_meas), q_est)
dtheta = 2 * dq[1:4]
r = self.sqrt_info @ np.block([dr, dtheta])
if kwargs.get('only_residuals', False):
return r
# Form jacobians
J = zeros((6, 6))
J[0:3, 0:3] = -eye(3)
J[3:6, 3:6] = quat_left(dq)[1:4, 1:4]
J = self.sqrt_info @ J
return (r, [J])
class MultiCameraBuffer:
""" Multi-camera buffer """
def __init__(self, nb_cams=0):
self.nb_cams = nb_cams
self._ts = []
self._data = {}
def reset(self):
""" Reset buffer """
self._ts = []
self._data = {}
def add(self, ts, cam_idx, data):
""" Add camera event """
if self.nb_cams == 0:
raise RuntimeError("MulitCameraBuffer not initialized yet!")
self._ts.append(ts)
self._data[cam_idx] = data
def ready(self):
""" Check whether buffer has all the camera frames ready """
if self.nb_cams == 0:
raise RuntimeError("MulitCameraBuffer not initialized yet!")
check_ts_same = (len(set(self._ts)) == 1)
check_ts_len = (len(self._ts) == self.nb_cams)
check_data = (len(self._data) == self.nb_cams)
check_cam_indices = (len(set(self._data.keys())) == self.nb_cams)
return check_ts_same and check_ts_len and check_data and check_cam_indices
def get_camera_indices(self):
""" Get camera indices """
return self._data.keys()
def get_data(self):
""" Get camera data """
if self.nb_cams is None:
raise RuntimeError("MulitCameraBuffer not initialized yet!")
return self._data
class BAFactor(Factor):
""" BA Factor """
def __init__(self, cam_geom, pids, z, covar=eye(2)):
assert len(pids) == 3
assert len(z) == 2
assert covar.shape == (2, 2)
Factor.__init__(self, "BAFactor", pids, z, covar)
self.cam_geom = cam_geom
def get_reproj_error(self, cam_pose, feature, cam_params):
""" Get reprojection error """
T_WC = pose2tf(cam_pose)
p_W = feature
p_C = tf_point(inv(T_WC), p_W)
status, z_hat = self.cam_geom.project(cam_params, p_C)
if status is False:
return None
z = self.measurement
reproj_error = norm(z - z_hat)
return reproj_error
def eval(self, params, **kwargs):
""" Evaluate """
assert len(params) == 3
assert len(params[0]) == 7
assert len(params[1]) == 3
assert len(params[2]) == self.cam_geom.get_params_size()
# Setup
r = np.array([0.0, 0.0])
J0 = zeros((2, 6))
J1 = zeros((2, 3))
J2 = zeros((2, self.cam_geom.get_params_size()))
# Map params
cam_pose, feature, cam_params = params
# Project point in world frame to image plane
T_WC = pose2tf(cam_pose)
z_hat = zeros((2, 1))
p_W = zeros((3, 1))
p_W = feature
p_C = tf_point(inv(T_WC), p_W)
status, z_hat = self.cam_geom.project(cam_params, p_C)
# Calculate residual
sqrt_info = self.sqrt_info
z = self.measurement
r = sqrt_info @ (z - z_hat)
if kwargs.get('only_residuals', False):
return r
# Calculate Jacobians
if status is False:
return (r, [J0, J1, J2])
# -- Measurement model jacobian
neg_sqrt_info = -1.0 * sqrt_info
Jh = self.cam_geom.J_proj(cam_params, p_C)
Jh_weighted = neg_sqrt_info @ Jh
# -- Jacobian w.r.t. camera pose T_WC
C_WC = tf_rot(T_WC)
C_CW = C_WC.T
r_WC = tf_trans(T_WC)
J0 = zeros((2, 6)) # w.r.t Camera pose T_WC
J0[0:2, 0:3] = Jh_weighted @ -C_CW
J0[0:2, 3:6] = Jh_weighted @ -C_CW @ skew(p_W - r_WC) @ -C_WC
# -- Jacobian w.r.t. feature
J1 = zeros((2, 3))
J1 = Jh_weighted @ C_CW
# -- Jacobian w.r.t. camera parameters
J_cam_params = self.cam_geom.J_params(cam_params, p_C)
J2 = zeros((2, self.cam_geom.get_params_size()))
J2 = neg_sqrt_info @ J_cam_params
return (r, [J0, J1, J2])
class VisionFactor(Factor):
""" Vision Factor """
def __init__(self, cam_geom, pids, z, covar=eye(2)):
assert len(pids) == 4
assert len(z) == 2
assert covar.shape == (2, 2)
Factor.__init__(self, "VisionFactor", pids, z, covar)
self.cam_geom = cam_geom
def get_reproj_error(self, pose, cam_exts, feature, cam_params):
""" Get reprojection error """
T_WB = pose2tf(pose)
T_BCi = pose2tf(cam_exts)
p_W = feature
p_C = tf_point(inv(T_WB @ T_BCi), p_W)
status, z_hat = self.cam_geom.project(cam_params, p_C)
if status is False:
return None
z = self.measurement
reproj_error = norm(z - z_hat)
return reproj_error
def eval(self, params, **kwargs):
""" Evaluate """
assert len(params) == 4
assert len(params[0]) == 7
assert len(params[1]) == 7
assert len(params[2]) == 3
assert len(params[3]) == self.cam_geom.get_params_size()
# Setup
r = np.array([0.0, 0.0])
J0 = zeros((2, 6))
J1 = zeros((2, 6))
J2 = zeros((2, 3))
J3 = zeros((2, self.cam_geom.get_params_size()))
# Project point in world frame to image plane
pose, cam_exts, feature, cam_params = params
T_WB = pose2tf(pose)
T_BCi = pose2tf(cam_exts)
p_W = feature
p_C = tf_point(inv(T_WB @ T_BCi), p_W)
status, z_hat = self.cam_geom.project(cam_params, p_C)
# Calculate residual
sqrt_info = self.sqrt_info
z = self.measurement
r = sqrt_info @ (z - z_hat)
if kwargs.get('only_residuals', False):
return r
# Calculate Jacobians
if status is False:
return (r, [J0, J1, J2, J3])
C_BCi = tf_rot(T_BCi)
C_WB = tf_rot(T_WB)
C_CB = C_BCi.T
C_BW = C_WB.T
C_CW = C_CB @ C_WB.T
r_WB = tf_trans(T_WB)
neg_sqrt_info = -1.0 * sqrt_info
# -- Measurement model jacobian
Jh = self.cam_geom.J_proj(cam_params, p_C)
Jh_weighted = neg_sqrt_info @ Jh
# -- Jacobian w.r.t. pose T_WB
J0 = zeros((2, 6))
J0[0:2, 0:3] = Jh_weighted @ C_CB @ -C_BW
J0[0:2, 3:6] = Jh_weighted @ C_CB @ -C_BW @ skew(p_W - r_WB) @ -C_WB
# -- Jacobian w.r.t. camera extrinsics T_BCi
J1 = zeros((2, 6))
J1[0:2, 0:3] = Jh_weighted @ -C_CB
J1[0:2, 3:6] = Jh_weighted @ -C_CB @ skew(C_BCi @ p_C) @ -C_BCi
# -- Jacobian w.r.t. feature
J2 = zeros((2, 3))
J2 = Jh_weighted @ C_CW
# -- Jacobian w.r.t. camera parameters
J_cam_params = self.cam_geom.J_params(cam_params, p_C)
J3 = zeros((2, 8))
J3 = neg_sqrt_info @ J_cam_params
return (r, [J0, J1, J2, J3])
class CalibVisionFactor(Factor):
""" Calibration Vision Factor """
def __init__(self, cam_geom, pids, grid_data, covar=eye(2)):
assert len(pids) == 3
assert len(grid_data) == 4
assert covar.shape == (2, 2)
tag_id, corner_idx, r_FFi, z = grid_data
Factor.__init__(self, "CalibVisionFactor", pids, z, covar)
self.cam_geom = cam_geom
self.tag_id = tag_id
self.corner_idx = corner_idx
self.r_FFi = r_FFi
def get_residual(self, pose, cam_exts, cam_params):
""" Get residual """
T_BF = pose2tf(pose)
T_BCi = pose2tf(cam_exts)
T_CiB = inv(T_BCi)
r_CiFi = tf_point(T_CiB @ T_BF, self.r_FFi)
status, z_hat = self.cam_geom.project(cam_params, r_CiFi)
if status is False:
return None
r = self.measurement - z_hat
return r
def get_reproj_error(self, pose, cam_exts, cam_params):
""" Get reprojection error """
r = self.get_residual(pose, cam_exts, cam_params)
if r is None:
return None
return norm(r)
def eval(self, params, **kwargs):
""" Evaluate """
assert len(params) == 3
assert len(params[0]) == 7
assert len(params[1]) == 7
assert len(params[2]) == self.cam_geom.get_params_size()
# Setup
r = np.array([0.0, 0.0])
J0 = zeros((2, 6))
J1 = zeros((2, 6))
J2 = zeros((2, self.cam_geom.get_params_size()))
# Map parameters out
pose, cam_exts, cam_params = params
T_BF = pose2tf(pose)
T_BCi = pose2tf(cam_exts)
# Transform and project point to image plane
T_CiB = inv(T_BCi)
r_CiFi = tf_point(T_CiB @ T_BF, self.r_FFi)
status, z_hat = self.cam_geom.project(cam_params, r_CiFi)
# Calculate residual
sqrt_info = self.sqrt_info
z = self.measurement
r = sqrt_info @ (z - z_hat)
if kwargs.get('only_residuals', False):
return r
# Calculate Jacobians
if status is False:
return (r, [J0, J1, J2])
neg_sqrt_info = -1.0 * sqrt_info
Jh = self.cam_geom.J_proj(cam_params, r_CiFi)
Jh_weighted = neg_sqrt_info @ Jh
# -- Jacobians w.r.t relative camera pose T_BF
C_CiB = tf_rot(T_CiB)
C_BF = tf_rot(T_BF)
J0 = zeros((2, 6))
J0[0:2, 0:3] = Jh_weighted @ C_CiB
J0[0:2, 3:6] = Jh_weighted @ C_CiB @ -C_BF @ skew(self.r_FFi)
# -- Jacobians w.r.t T_BCi
r_BFi = tf_point(T_BF, self.r_FFi)
r_BCi = tf_trans(T_BCi)
C_BCi = tf_rot(T_BCi)
J1 = zeros((2, 6))
J1[0:2, 0:3] = Jh_weighted @ -C_CiB
J1[0:2, 3:6] = Jh_weighted @ -C_CiB @ skew(r_BFi - r_BCi) @ -C_BCi
# -- Jacobians w.r.t cam params
J_cam_params = self.cam_geom.J_params(cam_params, r_CiFi)
J2 = neg_sqrt_info @ J_cam_params
return (r, [J0, J1, J2])
class ImuBuffer:
""" IMU buffer """
def __init__(self, ts=None, acc=None, gyr=None):
self.ts = ts if ts is not None else []
self.acc = acc if acc is not None else []
self.gyr = gyr if gyr is not None else []
def add(self, ts, acc, gyr):
""" Add imu measurement """
self.ts.append(ts)
self.acc.append(acc)
self.gyr.append(gyr)
def add_event(self, imu_event):
""" Add imu event """
self.ts.append(imu_event.ts)
self.acc.append(imu_event.acc)
self.gyr.append(imu_event.gyr)
def length(self):
""" Return length of imu buffer """
return len(self.ts)
@dataclass
class ImuParams:
""" IMU parameters """
noise_acc: np.array
noise_gyr: np.array
noise_ba: np.array
noise_bg: np.array
g: np.array = np.array([0.0, 0.0, 9.81])
@dataclass
class ImuFactorData:
""" IMU Factor data """
state_F: np.array
state_P: np.array
dr: np.array
dv: np.array
dC: np.array
ba: np.array
bg: np.array
g: np.array
Dt: float
class ImuFactor(Factor):
""" Imu Factor """
def __init__(self, pids, imu_params, imu_buf, sb_i):
assert len(pids) == 4
self.imu_params = imu_params
self.imu_buf = imu_buf
data = self.propagate(imu_buf, imu_params, sb_i)
Factor.__init__(self, "ImuFactor", pids, None, data.state_P)
self.state_F = data.state_F
self.state_P = data.state_P
self.dr = data.dr
self.dv = data.dv
self.dC = data.dC
self.ba = data.ba
self.bg = data.bg
self.g = data.g
self.Dt = data.Dt
@staticmethod
def propagate(imu_buf, imu_params, sb_i):
""" Propagate imu measurements """
# Setup
Dt = 0.0
g = imu_params.g
state_F = eye(15) # State jacobian
state_P = zeros((15, 15)) # State covariance
# Noise matrix Q
Q = zeros((12, 12))
Q[0:3, 0:3] = imu_params.noise_acc**2 * eye(3)
Q[3:6, 3:6] = imu_params.noise_gyr**2 * eye(3)
Q[6:9, 6:9] = imu_params.noise_ba**2 * eye(3)
Q[9:12, 9:12] = imu_params.noise_bg**2 * eye(3)
# Pre-integrate relative position, velocity, rotation and biases
dr = np.array([0.0, 0.0, 0.0]) # Relative position
dv = np.array([0.0, 0.0, 0.0]) # Relative velocity
dC = eye(3) # Relative rotation
ba_i = sb_i.param[3:6] # Accel biase at i
bg_i = sb_i.param[6:9] # Gyro biase at i
# Pre-integrate imu measuremenets
for k in range(len(imu_buf.ts) - 1):
# Timestep
ts_i = imu_buf.ts[k]
ts_j = imu_buf.ts[k + 1]
dt = ts2sec(ts_j - ts_i)
dt_sq = dt * dt
# Accelerometer and gyroscope measurements
acc_i = imu_buf.acc[k]
gyr_i = imu_buf.gyr[k]
# Propagate IMU state using Euler method
dr = dr + (dv * dt) + (0.5 * dC @ (acc_i - ba_i) * dt_sq)
dv = dv + dC @ (acc_i - ba_i) * dt
dC = dC @ Exp((gyr_i - bg_i) * dt)
ba = ba_i
bg = bg_i
# Make sure determinant of rotation is 1 by normalizing the quaternion
dq = quat_normalize(rot2quat(dC))
dC = quat2rot(dq)
# Continuous time transition matrix F
F = zeros((15, 15))
F[0:3, 3:6] = eye(3)
F[3:6, 6:9] = -1.0 * dC @ skew(acc_i - ba_i)
F[3:6, 9:12] = -1.0 * dC
F[6:9, 6:9] = -1.0 * skew(gyr_i - bg_i)
F[6:9, 12:15] = -eye(3)
# Continuous time input jacobian G
G = zeros((15, 12))
G[3:6, 0:3] = -1.0 * dC
G[6:9, 3:6] = -eye(3)
G[9:12, 6:9] = eye(3)
G[12:15, 9:12] = eye(3)
# Update
G_dt = G * dt
I_F_dt = eye(15) + F * dt
state_F = I_F_dt @ state_F
state_P = I_F_dt @ state_P @ I_F_dt.T + G_dt @ Q @ G_dt.T
Dt += dt
state_P = (state_P + state_P.T) / 2.0
return ImuFactorData(state_F, state_P, dr, dv, dC, ba, bg, g, Dt)
def eval(self, params, **kwargs):
""" Evaluate IMU factor """
assert len(params) == 4
assert len(params[0]) == 7
assert len(params[1]) == 9
assert len(params[2]) == 7
assert len(params[3]) == 9
# Map params
pose_i, sb_i, pose_j, sb_j = params
# Timestep i
T_i = pose2tf(pose_i)
r_i = tf_trans(T_i)
C_i = tf_rot(T_i)
q_i = tf_quat(T_i)
v_i = sb_i[0:3]
ba_i = sb_i[3:6]
bg_i = sb_i[6:9]
# Timestep j
T_j = pose2tf(pose_j)
r_j = tf_trans(T_j)
C_j = tf_rot(T_j)
q_j = tf_quat(T_j)
v_j = sb_j[0:3]
# Correct the relative position, velocity and orientation
# -- Extract jacobians from error-state jacobian
dr_dba = self.state_F[0:3, 9:12]
dr_dbg = self.state_F[0:3, 12:15]
dv_dba = self.state_F[3:6, 9:12]
dv_dbg = self.state_F[3:6, 12:15]
dq_dbg = self.state_F[6:9, 12:15]
dba = ba_i - self.ba
dbg = bg_i - self.bg
# -- Correct the relative position, velocity and rotation
dr = self.dr + dr_dba @ dba + dr_dbg @ dbg
dv = self.dv + dv_dba @ dba + dv_dbg @ dbg
dC = self.dC @ Exp(dq_dbg @ dbg)
dq = quat_normalize(rot2quat(dC))
# Form residuals
sqrt_info = self.sqrt_info
g = self.g
Dt = self.Dt
Dt_sq = Dt * Dt
dr_meas = (C_i.T @ ((r_j - r_i) - (v_i * Dt) + (0.5 * g * Dt_sq)))
dv_meas = (C_i.T @ ((v_j - v_i) + (g * Dt)))
err_pos = dr_meas - dr
err_vel = dv_meas - dv
err_rot = (2.0 * quat_mul(quat_inv(dq), quat_mul(quat_inv(q_i), q_j)))[1:4]
err_ba = np.array([0.0, 0.0, 0.0])
err_bg = np.array([0.0, 0.0, 0.0])
r = sqrt_info @ np.block([err_pos, err_vel, err_rot, err_ba, err_bg])
if kwargs.get('only_residuals', False):
return r
# Form jacobians
J0 = zeros((15, 6)) # residuals w.r.t pose i
J1 = zeros((15, 9)) # residuals w.r.t speed and biase i
J2 = zeros((15, 6)) # residuals w.r.t pose j
J3 = zeros((15, 9)) # residuals w.r.t speed and biase j
# -- Jacobian w.r.t. pose i
# yapf: disable
J0[0:3, 0:3] = -C_i.T # dr w.r.t r_i
J0[0:3, 3:6] = skew(dr_meas) # dr w.r.t C_i
J0[3:6, 3:6] = skew(dv_meas) # dv w.r.t C_i
J0[6:9, 3:6] = -(quat_left(rot2quat(C_j.T @ C_i)) @ quat_right(dq))[1:4, 1:4] # dtheta w.r.t C_i
J0 = sqrt_info @ J0
# yapf: enable
# -- Jacobian w.r.t. speed and biases i
# yapf: disable
J1[0:3, 0:3] = -C_i.T * Dt # dr w.r.t v_i
J1[0:3, 3:6] = -dr_dba # dr w.r.t ba
J1[0:3, 6:9] = -dr_dbg # dr w.r.t bg
J1[3:6, 0:3] = -C_i.T # dv w.r.t v_i
J1[3:6, 3:6] = -dv_dba # dv w.r.t ba
J1[3:6, 6:9] = -dv_dbg # dv w.r.t bg
J1[6:9, 6:9] = -quat_left(rot2quat(C_j.T @ C_i @ self.dC))[1:4, 1:4] @ dq_dbg # dtheta w.r.t C_i
J1 = sqrt_info @ J1
# yapf: enable
# -- Jacobian w.r.t. pose j
# yapf: disable
J2[0:3, 0:3] = C_i.T # dr w.r.t r_j
J2[6:9, 3:6] = quat_left(rot2quat(dC.T @ C_i.T @ C_j))[1:4, 1:4] # dtheta w.r.t C_j
J2 = sqrt_info @ J2
# yapf: enable
# -- Jacobian w.r.t. sb j
J3[3:6, 0:3] = C_i.T # dv w.r.t v_j
J3 = sqrt_info @ J3
return (r, [J0, J1, J2, J3])
def check_factor_jacobian(factor, fvars, var_idx, jac_name, **kwargs):
""" Check factor jacobian """
# Step size and threshold
h = kwargs.get('step_size', 1e-8)
threshold = kwargs.get('threshold', 1e-4)
verbose = kwargs.get('verbose', False)
# Calculate baseline
params = [sv.param for sv in fvars]
r, jacs = factor.eval(params)
# Numerical diff
J_fdiff = zeros((len(r), fvars[var_idx].min_dims))
for i in range(fvars[var_idx].min_dims):
# Forward difference and evaluate
vars_fwd = copy.deepcopy(fvars)
vars_fwd[var_idx] = perturb_state_variable(vars_fwd[var_idx], i, 0.5 * h)
r_fwd, _ = factor.eval([sv.param for sv in vars_fwd])
# Backward difference and evaluate
vars_bwd = copy.deepcopy(fvars)
vars_bwd[var_idx] = perturb_state_variable(vars_bwd[var_idx], i, -0.5 * h)
r_bwd, _ = factor.eval([sv.param for sv in vars_bwd])
# Central finite difference
J_fdiff[:, i] = (r_fwd - r_bwd) / h
J = jacs[var_idx]
return check_jacobian(jac_name, J_fdiff, J, threshold, verbose)
# FACTOR GRAPH ################################################################
class FactorGraph:
""" Factor Graph """
def __init__(self):
# Parameters and factors
self._next_param_id = 0
self._next_factor_id = 0
self.params = {}
self.factors = {}
# Solver
self.solver_max_iter = 5
self.solver_lambda = 1e-4
def add_param(self, param):
""" Add param """
param_id = self._next_param_id
self.params[param_id] = param
self.params[param_id].set_param_id(param_id)
self._next_param_id += 1
return param_id
def add_factor(self, factor):
""" Add factor """
# Double check if params exists
for param_id in factor.param_ids:
if param_id not in self.params:
raise RuntimeError(f"Parameter [{param_id}] does not exist!")
# Add factor
factor_id = self._next_factor_id
self.factors[factor_id] = factor
self.factors[factor_id].set_factor_id(factor_id)
self._next_factor_id += 1
return factor_id
def remove_param(self, param):
""" Remove param """
assert param.param_id in self.params
del self.params[param.param_id]
def remove_factor(self, factor):
""" Remove factor """
assert factor.factor_id in self.factors
del self.factors[factor.factor_id]
def get_reproj_errors(self):
""" Get reprojection errors """
target_factors = ["BAFactor", "VisionFactor", "CalibVisionFactor"]
reproj_errors = []
for _, factor in self.factors.items():
if factor.factor_type in target_factors:
factor_params = [self.params[pid].param for pid in factor.param_ids]
retval = factor.get_reproj_error(*factor_params)
if retval is not None:
reproj_errors.append(retval)
return np.array(reproj_errors).flatten()
@staticmethod
def _print_to_console(iter_k, lambda_k, cost_kp1, cost_k):
""" Print to console """
print(f"iter[{iter_k}]:", end=" ")
print(f"lambda: {lambda_k:.2e}", end=", ")
print(f"cost: {cost_kp1:.2e}", end=", ")
print(f"dcost: {cost_kp1 - cost_k:.2e}", end=" ")
print()
# rmse_vision = rmse(self._get_reproj_errors())
# print(f"rms_reproj_error: {rmse_vision:.2f} px")
sys.stdout.flush()
def _form_param_indices(self):
""" Form parameter indices """
# Parameter ids
pose_param_ids = set()
sb_param_ids = set()
camera_param_ids = set()
exts_param_ids = set()
feature_param_ids = set()
# Track parameters
nb_params = 0
for _, factor in self.factors.items():
for _, param_id in enumerate(factor.param_ids):
param = self.params[param_id]
if param.fix:
continue
elif param.var_type == "pose":
pose_param_ids.add(param_id)
elif param.var_type == "speed_and_biases":
sb_param_ids.add(param_id)
elif param.var_type == "extrinsics":
exts_param_ids.add(param_id)
elif param.var_type == "feature":
feature_param_ids.add(param_id)
elif param.var_type == "camera":
camera_param_ids.add(param_id)
nb_params += 1
# Assign global parameter order
param_ids_list = []
param_ids_list.append(pose_param_ids)
param_ids_list.append(sb_param_ids)
param_ids_list.append(exts_param_ids)
param_ids_list.append(feature_param_ids)
param_ids_list.append(camera_param_ids)
param_idxs = {}
param_size = 0
for param_ids in param_ids_list:
for param_id in param_ids:
param_idxs[param_id] = param_size
param_size += self.params[param_id].min_dims
return (param_idxs, param_size)
def _linearize(self, params, param_idxs, param_size):
""" Linearize non-linear problem """
H = zeros((param_size, param_size))
g = zeros(param_size)
# Form Hessian and R.H.S of Gauss newton
for _, factor in self.factors.items():
factor_params = [params[pid].param for pid in factor.param_ids]
r, jacobians = factor.eval(factor_params)
# Form Hessian
nb_params = len(factor_params)
for i in range(nb_params):
param_i = params[factor.param_ids[i]]
if param_i.fix:
continue
idx_i = param_idxs[factor.param_ids[i]]
size_i = param_i.min_dims
J_i = jacobians[i]
for j in range(i, nb_params):
param_j = params[factor.param_ids[j]]
if param_j.fix:
continue
idx_j = param_idxs[factor.param_ids[j]]
size_j = param_j.min_dims
J_j = jacobians[j]
rs = idx_i
re = idx_i + size_i
cs = idx_j
ce = idx_j + size_j
if i == j: # Diagonal
H[rs:re, cs:ce] += J_i.T @ J_j
else: # Off-Diagonal
H[rs:re, cs:ce] += J_i.T @ J_j
H[cs:ce, rs:re] += H[rs:re, cs:ce].T
# Form R.H.S. Gauss Newton g
rs = idx_i
re = idx_i + size_i
g[rs:re] += (-J_i.T @ r)
return (H, g)
def _evaluate(self, params):
""" Evaluate """
(param_idxs, param_size) = self._form_param_indices()
(H, g) = self._linearize(params, param_idxs, param_size)
return ((H, g), param_idxs)
def _calculate_residuals(self, params):
""" Calculate Residuals """
residuals = []
for _, factor in self.factors.items():
factor_params = [params[pid].param for pid in factor.param_ids]
r = factor.eval(factor_params, only_residuals=True)
residuals.append(r)
return np.array(residuals).flatten()
def _calculate_cost(self, params):
""" Calculate Cost """
r = self._calculate_residuals(params)
return 0.5 * (r.T @ r)
@staticmethod
def _update(params_k, param_idxs, dx):
""" Update """
params_kp1 = copy.deepcopy(params_k)
for param_id, param in params_kp1.items():
# Check if param even exists
if param_id not in param_idxs:
continue
# Update parameter
start = param_idxs[param_id]
end = start + param.min_dims
param_dx = dx[start:end]
update_state_variable(param, param_dx)
return params_kp1
@staticmethod
def _solve_for_dx(lambda_k, H, g):
""" Solve for dx """
# Damp Hessian
H = H + lambda_k * eye(H.shape[0])
# H = H + lambda_k * np.diag(H.diagonal())
# # Pseudo inverse
# dx = pinv(H) @ g
# # Linear solver
# dx = np.linalg.solve(H, g)
# # Cholesky decomposition
c, low = scipy.linalg.cho_factor(H)
dx = scipy.linalg.cho_solve((c, low), g)
# SVD
# dx = solve_svd(H, g)
# # Sparse cholesky decomposition
# sH = scipy.sparse.csc_matrix(H)
# dx = scipy.sparse.linalg.spsolve(sH, g)
return dx
def solve(self, verbose=False):
""" Solve """
lambda_k = self.solver_lambda
params_k = copy.deepcopy(self.params)
cost_k = self._calculate_cost(params_k)
# First evaluation
if verbose:
print(f"nb_factors: {len(self.factors)}")
print(f"nb_params: {len(self.params)}")
self._print_to_console(0, lambda_k, cost_k, cost_k)
# Iterate
for i in range(1, self.solver_max_iter):
# Update and calculate cost
((H, g), param_idxs) = self._evaluate(params_k)
dx = self._solve_for_dx(lambda_k, H, g)
params_kp1 = self._update(params_k, param_idxs, dx)
cost_kp1 = self._calculate_cost(params_kp1)
# Verbose
if verbose:
self._print_to_console(i, lambda_k, cost_kp1, cost_k)
# Accept or reject update
if cost_kp1 < cost_k:
# Accept update
cost_k = cost_kp1
params_k = params_kp1
lambda_k /= 10.0
else:
# Reject update
params_k = params_k
lambda_k *= 10.0
# Finish - set the original params the optimized values
# Note: The reason we don't just do `self.params = params_k` is because
# that would destroy the references to outside `FactorGraph()`.
for param_id, param in params_k.items():
self.params[param_id].param = param.param
# FEATURE TRACKING #############################################################
def draw_matches(img_i, img_j, kps_i, kps_j, **kwargs):
"""
Draw keypoint matches between images `img_i` and `img_j` with keypoints
`kps_i` and `kps_j`
"""
assert len(kps_i) == len(kps_j)
nb_kps = len(kps_i)
viz = cv2.hconcat([img_i, img_j])
viz = cv2.cvtColor(viz, cv2.COLOR_GRAY2RG)
color = (0, 255, 0)
radius = 3
thickness = kwargs.get('thickness', cv2.FILLED)
linetype = kwargs.get('linetype', cv2.LINE_AA)
for n in range(nb_kps):
pt_i = None
pt_j = None
if hasattr(kps_i[n], 'pt'):
pt_i = (int(kps_i[n].pt[0]), int(kps_i[n].pt[1]))
pt_j = (int(kps_j[n].pt[0] + img_i.shape[1]), int(kps_j[n].pt[1]))
else:
pt_i = (int(kps_i[n][0]), int(kps_i[n][1]))
pt_j = (int(kps_j[n][0] + img_i.shape[1]), int(kps_j[n][1]))
cv2.circle(viz, pt_i, radius, color, thickness, lineType=linetype)
cv2.circle(viz, pt_j, radius, color, thickness, lineType=linetype)
cv2.line(viz, pt_i, pt_j, color, 1, linetype)
return viz
def draw_keypoints(img, kps, inliers=None, **kwargs):
"""
Draw points `kps` on image `img`. The `inliers` boolean list is optional
and is expected to be the same size as `kps` denoting whether the point
should be drawn or not.
"""
inliers = [1 for i in range(len(kps))] if inliers is None else inliers
radius = kwargs.get('radius', 2)
color = kwargs.get('color', (0, 255, 0))
thickness = kwargs.get('thickness', cv2.FILLED)
linetype = kwargs.get('linetype', cv2.LINE_AA)
viz = img
if len(img.shape) == 2:
viz = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
for n, kp in enumerate(kps):
if inliers[n]:
p = None
if hasattr(kp, 'pt'):
p = (int(kp.pt[0]), int(kp.pt[1]))
else:
p = (int(kp[0]), int(kp[1]))
cv2.circle(viz, p, radius, color, thickness, lineType=linetype)
return viz
def sort_keypoints(kps):
""" Sort a list of cv2.KeyPoint based on their response """
responses = [kp.response for kp in kps]
indices = range(len(responses))
indices = sorted(indices, key=lambda i: responses[i], reverse=True)
return [kps[i] for i in indices]
def spread_keypoints(img, kps, min_dist, **kwargs):
"""
Given a set of keypoints `kps` make sure they are atleast `min_dist` pixels
away from each other, if they are not remove them.
"""
# Pre-check
if not kps:
return kps
# Setup
debug = kwargs.get('debug', False)
prev_kps = kwargs.get('prev_kps', [])
min_dist = int(min_dist)
img_h, img_w = img.shape
A = np.zeros(img.shape) # Allowable areas are marked 0 else not allowed
# Loop through previous keypoints
for kp in prev_kps:
# Convert from keypoint to tuple
p = (int(kp.pt[0]), int(kp.pt[1]))
# Fill the area of the matrix where the next keypoint cannot be around
rs = int(max(p[1] - min_dist, 0.0))
re = int(min(p[1] + min_dist + 1, img_h))
cs = int(max(p[0] - min_dist, 0.0))
ce = int(min(p[0] + min_dist + 1, img_w))
A[rs:re, cs:ce] = np.ones((re - rs, ce - cs))
# Loop through keypoints
kps_results = []
for kp in sort_keypoints(kps):
# Convert from keypoint to tuple
p = (int(kp.pt[0]), int(kp.pt[1]))
# Check if point is ok to be added to results
if A[p[1], p[0]] > 0.0:
continue
# Fill the area of the matrix where the next keypoint cannot be around
rs = int(max(p[1] - min_dist, 0.0))
re = int(min(p[1] + min_dist + 1, img_h))
cs = int(max(p[0] - min_dist, 0.0))
ce = int(min(p[0] + min_dist + 1, img_w))
A[rs:re, cs:ce] = np.ones((re - rs, ce - cs))
A[p[1], p[0]] = 2
# Add to results
kps_results.append(kp)
# Debug
if debug:
img = draw_keypoints(img, kps_results, radius=3)
plt.figure()
ax = plt.subplot(121)
ax.imshow(A)
ax.set_xlabel('pixel')
ax.set_ylabel('pixel')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax = plt.subplot(122)
ax.imshow(img)
ax.set_xlabel('pixel')
ax.set_ylabel('pixel')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
plt.show()
return kps_results
class FeatureGrid:
"""
FeatureGrid
The idea is to take all the feature positions and put them into grid cells
across the full image space. This is so that one could keep track of how many
feautures are being tracked in each individual grid cell and act accordingly.
o-----> x
| ---------------------
| | 0 | 1 | 2 | 3 |
V ---------------------
y | 4 | 5 | 6 | 7 |
---------------------
| 8 | 9 | 10 | 11 |
---------------------
| 12 | 13 | 14 | 15 |
---------------------
grid_x = ceil((max(1, pixel_x) / img_w) * grid_cols) - 1.0
grid_y = ceil((max(1, pixel_y) / img_h) * grid_rows) - 1.0
cell_id = int(grid_x + (grid_y * grid_cols))
"""
def __init__(self, grid_rows, grid_cols, image_shape, keypoints):
assert len(image_shape) == 2
self.grid_rows = grid_rows
self.grid_cols = grid_cols
self.image_shape = image_shape
self.keypoints = keypoints
self.cell = [0 for i in range(self.grid_rows * self.grid_cols)]
for kp in keypoints:
if hasattr(kp, 'pt'):
# cv2.KeyPoint
assert (kp.pt[0] >= 0 and kp.pt[0] <= image_shape[1])
assert (kp.pt[1] >= 0 and kp.pt[1] <= image_shape[0])
self.cell[self.cell_index(kp.pt)] += 1
else:
# Tuple
assert (kp[0] >= 0 and kp[0] <= image_shape[1])
assert (kp[1] >= 0 and kp[1] <= image_shape[0])
self.cell[self.cell_index(kp)] += 1
def cell_index(self, pt):
""" Return cell index based on point `pt` """
pixel_x, pixel_y = pt
img_h, img_w = self.image_shape
grid_x = math.ceil((max(1, pixel_x) / img_w) * self.grid_cols) - 1.0
grid_y = math.ceil((max(1, pixel_y) / img_h) * self.grid_rows) - 1.0
cell_id = int(grid_x + (grid_y * self.grid_cols))
return cell_id
def count(self, cell_idx):
""" Return cell count """
return self.cell[cell_idx]
def grid_detect(detector, image, **kwargs):
"""
Detect features uniformly using a grid system.
"""
optflow_mode = kwargs.get('optflow_mode', False)
max_keypoints = kwargs.get('max_keypoints', 240)
grid_rows = kwargs.get('grid_rows', 3)
grid_cols = kwargs.get('grid_cols', 4)
prev_kps = kwargs.get('prev_kps', [])
if prev_kps is None:
prev_kps = []
# Calculate number of grid cells and max corners per cell
image_height, image_width = image.shape
dx = int(math.ceil(float(image_width) / float(grid_cols)))
dy = int(math.ceil(float(image_height) / float(grid_rows)))
nb_cells = grid_rows * grid_cols
max_per_cell = math.floor(max_keypoints / nb_cells)
# Detect corners in each grid cell
feature_grid = FeatureGrid(grid_rows, grid_cols, image.shape, prev_kps)
des_all = []
kps_all = []
cell_idx = 0
for y in range(0, image_height, dy):
for x in range(0, image_width, dx):
# Make sure roi width and height are not out of bounds
w = image_width - x if (x + dx > image_width) else dx
h = image_height - y if (y + dy > image_height) else dy
# Detect corners in grid cell
cs, ce, rs, re = (x, x + w, y, y + h)
roi_image = image[rs:re, cs:ce]
kps = None
des = None
if optflow_mode:
detector.setNonmaxSuppression(1)
kps = detector.detect(roi_image)
kps = sort_keypoints(kps)
else:
kps = detector.detect(roi_image, None)
kps, des = detector.compute(roi_image, kps)
# Offset keypoints
cell_vacancy = max_per_cell - feature_grid.count(cell_idx)
if cell_vacancy <= 0:
continue
limit = min(len(kps), cell_vacancy)
for i in range(limit):
kp = kps[i]
kp.pt = (kp.pt[0] + x, kp.pt[1] + y)
kps_all.append(kp)
des_all.append(des[i, :] if optflow_mode is False else None)
# Update cell_idx
cell_idx += 1
# Space out the keypoints
if optflow_mode:
kps_all = spread_keypoints(image, kps_all, 20, prev_kps=prev_kps)
# Debug
if kwargs.get('debug', False):
# Setup
viz = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
kps_grid = FeatureGrid(grid_rows, grid_cols, image.shape, kps_all)
# Visualization properties
red = (0, 0, 255)
yellow = (0, 255, 255)
linetype = cv2.LINE_AA
font = cv2.FONT_HERSHEY_SIMPLEX
# -- Draw horizontal lines
for x in range(0, image_width, dx):
cv2.line(viz, (x, 0), (x, image_height), red, 1, linetype)
# -- Draw vertical lines
for y in range(0, image_height, dy):
cv2.line(viz, (0, y), (image_width, y), red, 1, linetype)
# -- Draw bin numbers
cell_idx = 0
for y in range(0, image_height, dy):
for x in range(0, image_width, dx):
text = str(kps_grid.count(cell_idx))
origin = (x + 10, y + 20)
viz = cv2.putText(viz, text, origin, font, 0.5, red, 1, linetype)
# text = str(feature_grid.count(cell_idx))
# origin = (x + 10, y + 20)
# viz = cv2.putText(viz, text, origin, font, 0.5, yellow, 1, linetype)
cell_idx += 1
# -- Draw keypoints
viz = draw_keypoints(viz, kps_all, color=red)
viz = draw_keypoints(viz, prev_kps, color=yellow)
cv2.imshow("viz", viz)
cv2.waitKey(0)
# Return
if optflow_mode:
return kps_all
return kps_all, np.array(des_all)
def optflow_track(img_i, img_j, pts_i, **kwargs):
"""
Track keypoints `pts_i` from image `img_i` to image `img_j` using optical
flow. Returns a tuple of `(pts_i, pts_j, inliers)` points in image i, j and a
vector of inliers.
"""
# Setup
patch_size = kwargs.get('patch_size', 50)
max_iter = kwargs.get('max_iter', 100)
epsilon = kwargs.get('epsilon', 0.001)
crit = (cv2.TermCriteria_COUNT | cv2.TermCriteria_EPS, max_iter, epsilon)
# Optical flow settings
config = {}
config['winSize'] = (patch_size, patch_size)
config['maxLevel'] = 3
config['criteria'] = crit
config['flags'] = cv2.OPTFLOW_USE_INITIAL_FLOW
# Track using optical flow
pts_j = np.array(pts_i)
track_results = cv2.calcOpticalFlowPyrLK(img_i, img_j, pts_i, pts_j, **config)
(pts_j, optflow_inliers, _) = track_results
# Make sure keypoints are within image dimensions
bound_inliers = []
img_h, img_w = img_j.shape
for p in pts_j:
x_ok = p[0] >= 0 and p[0] <= img_w
y_ok = p[1] >= 0 and p[1] <= img_h
if x_ok and y_ok:
bound_inliers.append(True)
else:
bound_inliers.append(False)
# Update or mark feature as lost
assert len(pts_i) == optflow_inliers.shape[0]
assert len(pts_i) == len(bound_inliers)
inliers = []
for i in range(len(pts_i)):
if optflow_inliers[i, 0] and bound_inliers[i]:
inliers.append(True)
else:
inliers.append(False)
if kwargs.get('debug', False):
viz_i = draw_keypoints(img_i, pts_i, inliers)
viz_j = draw_keypoints(img_j, pts_j, inliers)
viz = cv2.hconcat([viz_i, viz_j])
cv2.imshow('viz', viz)
cv2.waitKey(0)
return (pts_i, pts_j, inliers)
def filter_outliers(pts_i, pts_j, inliers):
""" Filter outliers """
pts_out_i = []
pts_out_j = []
for n, status in enumerate(inliers):
if status:
pts_out_i.append(pts_i[n])
pts_out_j.append(pts_j[n])
return (pts_out_i, pts_out_j)
def ransac(pts_i, pts_j, cam_i, cam_j):
""" RANSAC """
# Setup
cam_geom_i = cam_i.data
cam_geom_j = cam_j.data
# Undistort points
pts_i_ud = np.array([cam_geom_i.undistort(cam_i.param, p) for p in pts_i])
pts_j_ud = np.array([cam_geom_j.undistort(cam_j.param, p) for p in pts_j])
# Ransac via OpenCV's find fundamental matrix
method = cv2.FM_RANSAC
reproj_thresh = 0.75
confidence = 0.99
args = [pts_i_ud, pts_j_ud, method, reproj_thresh, confidence]
_, inliers = cv2.findFundamentalMat(*args)
return inliers.flatten()
class FeatureTrackerData:
"""
Feature tracking data *per camera*
This data structure keeps track of:
- Image
- Keypoints
- Descriptors
- Feature ids (optional)
"""
def __init__(self, cam_idx, image, keypoints, feature_ids=None):
self.cam_idx = cam_idx
self.image = image
self.keypoints = list(keypoints)
self.feature_ids = list(feature_ids)
def add(self, fid, kp):
""" Add measurement """
assert isinstance(fid, int)
assert hasattr(kp, 'pt')
self.keypoints.append(kp)
self.feature_ids.append(fid)
assert len(self.keypoints) == len(self.feature_ids)
def update(self, image, fids, kps):
""" Extend measurements """
assert len(kps) == len(fids)
self.image = np.array(image)
if kps:
assert hasattr(kps[0], 'pt')
self.feature_ids.extend(fids)
self.keypoints.extend(kps)
assert len(self.keypoints) == len(self.feature_ids)
class FeatureTracker:
""" Feature tracker """
def __init__(self):
# Settings
self.mode = "TRACK_DEFAULT"
# self.mode = "TRACK_OVERLAPS"
# self.mode = "TRACK_INDEPENDENT"
# Settings
self.reproj_threshold = 5.0
# Data
self.prev_ts = None
self.frame_idx = 0
self.detector = cv2.FastFeatureDetector_create(threshold=50)
self.features_detected = 0
self.features_tracking = 0
self.feature_overlaps = {}
self.prev_mcam_imgs = None
self.kp_size = 0
self.cam_idxs = []
self.cam_params = {}
self.cam_exts = {}
self.cam_overlaps = {}
self.cam_data = {}
def add_camera(self, cam_idx, cam_params, cam_exts):
""" Add camera """
self.cam_idxs.append(cam_idx)
self.cam_data[cam_idx] = None
self.cam_params[cam_idx] = cam_params
self.cam_exts[cam_idx] = cam_exts
def add_overlap(self, cam_i_idx, cam_j_idx):
""" Add overlap """
if cam_i_idx not in self.cam_overlaps:
self.cam_overlaps[cam_i_idx] = []
self.cam_overlaps[cam_i_idx].append(cam_j_idx)
def num_tracking(self):
""" Return number of features tracking """
feature_ids = []
for _, cam_data in self.cam_data.items():
if cam_data is not None:
feature_ids.extend(cam_data.feature_ids)
return len(set(feature_ids))
def _get_camera_indices(self):
""" Get camera indices """
return [cam_idx for cam_idx, _ in self.cam_params]
def _get_keypoints(self, cam_idx):
""" Get keypoints observed by camera `cam_idx` """
keypoints = None
if self.cam_data[cam_idx] is not None:
keypoints = self.cam_data[cam_idx].keypoints
return keypoints
def _get_feature_ids(self, cam_idx):
""" Get feature ids observed by camera `cam_idx` """
feature_ids = None
if self.cam_data[cam_idx] is not None:
feature_ids = self.cam_data[cam_idx].feature_ids
return feature_ids
def _form_feature_ids(self, nb_kps):
""" Form list of feature ids for new features to be added """
self.features_detected += nb_kps
start_idx = self.features_detected - nb_kps
end_idx = start_idx + nb_kps
return list(range(start_idx, end_idx))
def _triangulate(self, idx_i, idx_j, z_i, z_j):
""" Triangulate feature """
# Setup
cam_i = self.cam_params[idx_i]
cam_j = self.cam_params[idx_j]
cam_geom_i = cam_i.data
cam_geom_j = cam_j.data
cam_exts_i = self.cam_exts[idx_i]
cam_exts_j = self.cam_exts[idx_j]
# Form projection matrices P_i and P_j
T_BCi = pose2tf(cam_exts_i.param)
T_BCj = pose2tf(cam_exts_j.param)
T_CiCj = inv(T_BCi) @ T_BCj
P_i = pinhole_P(cam_geom_i.proj_params(cam_i.param), eye(4))
P_j = pinhole_P(cam_geom_j.proj_params(cam_j.param), T_CiCj)
# Undistort image points z_i and z_j
x_i = cam_geom_i.undistort(cam_i.param, z_i)
x_j = cam_geom_j.undistort(cam_j.param, z_j)
# Linear triangulate
p_Ci = linear_triangulation(P_i, P_j, x_i, x_j)
return p_Ci
def _reproj_filter(self, idx_i, idx_j, pts_i, pts_j):
""" Filter features by triangulating them via a stereo-pair and see if the
reprojection error is reasonable """
assert idx_i != idx_j
assert len(pts_i) == len(pts_j)
# Reject outliers based on reprojection error
reproj_inliers = []
cam_i = self.cam_params[idx_i]
cam_geom_i = cam_i.data
nb_pts = len(pts_i)
for n in range(nb_pts):
# Triangulate
z_i = pts_i[n]
z_j = pts_j[n]
p_Ci = self._triangulate(idx_i, idx_j, z_i, z_j)
if p_Ci[2] < 0.0:
reproj_inliers.append(False)
continue
# Reproject
z_i_hat = cam_geom_i.project(cam_i.param, p_Ci)
if z_i_hat is None:
reproj_inliers.append(False)
else:
reproj_error = norm(z_i - z_i_hat)
if reproj_error > self.reproj_threshold:
reproj_inliers.append(False)
else:
reproj_inliers.append(True)
return reproj_inliers
def _add_features(self, cam_idxs, mcam_imgs, cam_kps, fids):
""" Add features """
# Pre-check
assert cam_idxs
assert all(cam_idx in mcam_imgs for cam_idx in cam_idxs)
assert all(cam_idx in cam_kps for cam_idx in cam_idxs)
# Add camera data
for idx in cam_idxs:
img = mcam_imgs[idx]
kps = cam_kps[idx]
assert len(kps) == len(fids)
if self.cam_data[idx] is None:
self.cam_data[idx] = FeatureTrackerData(idx, img, kps, fids)
else:
self.cam_data[idx].update(img, fids, kps)
# Update overlapping features
if len(cam_idxs) > 1:
for fid in fids:
self.feature_overlaps[fid] = 2
def _update_features(self, cam_idxs, mcam_imgs, cam_kps, fids):
""" Update features """
# Pre-check
assert cam_idxs
assert all(cam_idx in mcam_imgs for cam_idx in cam_idxs)
assert all(cam_idx in cam_kps for cam_idx in cam_idxs)
# Update camera data
for idx in cam_idxs:
img = mcam_imgs[idx]
kps = cam_kps[idx]
self.cam_data[idx] = FeatureTrackerData(idx, img, kps, fids)
# # Update lost features
# fids_out = set(fids)
# fids_lost = [x for x in fids_in if x not in fids_out]
# for fid in fids_lost:
# # feature overlaps
# if fid in self.feature_overlaps:
# self.feature_overlaps[fid] -= 1
# if self.feature_overlaps[fid] == 0:
# del self.feature_overlaps[fid]
def _detect(self, image, prev_kps=None):
""" Detect """
assert image is not None
kwargs = {'prev_kps': prev_kps, 'optflow_mode': True}
kps = grid_detect(self.detector, image, **kwargs)
self.kp_size = kps[0].size if kps else 0
return kps
def _detect_overlaps(self, mcam_imgs):
""" Detect overlapping features """
# Loop through camera overlaps
for idx_i, overlaps in self.cam_overlaps.items():
# Detect keypoints observed from idx_i (primary camera)
cam_i = self.cam_params[idx_i]
img_i = mcam_imgs[idx_i]
prev_kps = self._get_keypoints(idx_i)
kps_i = self._detect(img_i, prev_kps=prev_kps)
pts_i = np.array([kp.pt for kp in kps_i], dtype=np.float32)
fids_new = self._form_feature_ids(len(kps_i))
if not kps_i:
continue
# Track feature from camera idx_i to idx_j (primary to secondary camera)
for idx_j in overlaps:
# Optical flow
img_j = mcam_imgs[idx_j]
(_, pts_j, optflow_inliers) = optflow_track(img_i, img_j, pts_i)
# RANSAC
ransac_inliers = []
if len(kps_i) < 10:
ransac_inliers = np.array([True for _, _ in enumerate(kps_i)])
else:
cam_j = self.cam_params[idx_j]
ransac_inliers = ransac(pts_i, pts_j, cam_i, cam_j)
# Reprojection filter
reproj_inliers = self._reproj_filter(idx_i, idx_j, pts_i, pts_j)
# Filter outliers
inliers = optflow_inliers & ransac_inliers & reproj_inliers
kps_j = [cv2.KeyPoint(p[0], p[1], self.kp_size) for p in pts_j]
fids = []
cam_kps = {idx_i: [], idx_j: []}
for i, inlier in enumerate(inliers):
if inlier:
fids.append(fids_new[i])
cam_kps[idx_i].append(kps_i[i])
cam_kps[idx_j].append(kps_j[i])
# Add features
cam_idxs = [idx_i, idx_j]
cam_imgs = {idx_i: img_i, idx_j: img_j}
self._add_features(cam_idxs, cam_imgs, cam_kps, fids)
def _detect_nonoverlaps(self, mcam_imgs):
""" Detect non-overlapping features """
for idx in self.cam_params:
# Detect keypoints
img = mcam_imgs[idx]
prev_kps = self._get_keypoints(idx)
kps = self._detect(img, prev_kps=prev_kps)
if not kps:
return
# Add features
fids = self._form_feature_ids(len(kps))
self._add_features([idx], {idx: img}, {idx: kps}, fids)
def _detect_new(self, mcam_imgs):
""" Detect new features """
# Detect new features
if self.mode == "TRACK_DEFAULT":
self._detect_overlaps(mcam_imgs)
self._detect_nonoverlaps(mcam_imgs)
elif self.mode == "TRACK_OVERLAPS":
self._detect_overlaps(mcam_imgs)
elif self.mode == "TRACK_INDEPENDENT":
self._detect_nonoverlaps(mcam_imgs)
else:
raise RuntimeError("Invalid FeatureTracker mode [%s]!" % self.mode)
def _track_through_time(self, mcam_imgs, cam_idx):
""" Track features through time """
# Setup images
img_km1 = self.prev_mcam_imgs[cam_idx]
img_k = mcam_imgs[cam_idx]
# Setup keypoints and feature_ids
kps_km1 = self._get_keypoints(cam_idx)
feature_ids = self._get_feature_ids(cam_idx)
pts_km1 = np.array([kp.pt for kp in kps_km1], dtype=np.float32)
# Optical flow
(pts_km1, pts_k, optflow_inliers) = optflow_track(img_km1, img_k, pts_km1)
# RANSAC
ransac_inliers = []
if len(kps_km1) < 10:
ransac_inliers = np.array([True for _, _ in enumerate(kps_km1)])
else:
cam = self.cam_params[cam_idx]
ransac_inliers = ransac(pts_km1, pts_k, cam, cam)
# Form inliers list
optflow_inliers = np.array(optflow_inliers)
ransac_inliers = np.array(ransac_inliers)
inliers = optflow_inliers & ransac_inliers
return (pts_km1, pts_k, feature_ids, inliers)
def _track_stereo(self, mcam_imgs, idx_i, idx_j, pts_i):
""" Track feature through stereo-pair """
# Optical flow
img_i = mcam_imgs[idx_i]
img_j = mcam_imgs[idx_j]
(pts_i, pts_j, optflow_inliers) = optflow_track(img_i, img_j, pts_i)
# RANSAC
cam_i = self.cam_params[idx_i]
cam_j = self.cam_params[idx_j]
ransac_inliers = ransac(pts_i, pts_j, cam_i, cam_j)
# Reject outliers based on reprojection error
reproj_inliers = self._reproj_filter(idx_i, idx_j, pts_i, pts_j)
# Logical AND optflow_inliers and reproj_inliers
ransac_inliers = np.array(ransac_inliers)
optflow_inliers = np.array(optflow_inliers)
reproj_inliers = np.array(reproj_inliers)
inliers = optflow_inliers & ransac_inliers & reproj_inliers
return (pts_i, pts_j, inliers)
def _track_features(self, mcam_imgs):
""" Track features """
# Track features in each camera
for idx in self.cam_idxs:
# Track through time
track_results = self._track_through_time(mcam_imgs, idx)
(_, pts_k, fids_old, inliers) = track_results
fids = []
kps = []
for i, inlier in enumerate(inliers):
if inlier:
pt = pts_k[i]
fids.append(fids_old[i])
kps.append(cv2.KeyPoint(pt[0], pt[1], self.kp_size))
# Update features
cam_idxs = [idx]
cam_imgs = {idx: mcam_imgs[idx]}
cam_kps = {idx: kps}
self._update_features(cam_idxs, cam_imgs, cam_kps, fids)
def update(self, ts, mcam_imgs):
""" Update Feature Tracker """
# Track features
if self.frame_idx == 0:
self._detect_new(mcam_imgs)
self.features_tracking = self.num_tracking()
else:
self._track_features(mcam_imgs)
if (self.num_tracking() / self.features_tracking) < 0.8:
self._detect_new(mcam_imgs)
# Update
self.frame_idx += 1
self.prev_ts = ts
self.prev_mcam_imgs = mcam_imgs
return self.cam_data
def visualize_tracking(ft_data):
""" Visualize feature tracking data """
viz = []
radius = 4
green = (0, 255, 0)
yellow = (0, 255, 255)
thickness = 1
linetype = cv2.LINE_AA
# Find overlaps
fids = {}
feature_overlaps = set()
for _, cam_data in ft_data.items():
for n, _ in enumerate(cam_data.keypoints):
fid = cam_data.feature_ids[n]
fids[fid] = (fids[fid] + 1) if fid in fids else 1
if fids[fid] > 1:
feature_overlaps.add(fid)
# Draw features being tracked in each camera
for _, cam_data in ft_data.items():
img = cam_data.image
cam_viz = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
for n, kp in enumerate(cam_data.keypoints):
fid = cam_data.feature_ids[n]
color = green if fid in feature_overlaps else yellow
p = (int(kp.pt[0]), int(kp.pt[1])) if hasattr(kp, 'pt') else kp
cv2.circle(cam_viz, p, radius, color, thickness, lineType=linetype)
viz.append(cam_viz)
return cv2.hconcat(viz)
# STATE-ESTIMATOR #############################################################
class KeyFrame:
""" Key Frame """
def __init__(self, ts, images, pose, vision_factors):
self.ts = ts
self.images = images
self.pose = pose
self.vision_factors = vision_factors
class Tracker:
""" Tracker """
def __init__(self, feature_tracker):
# Feature tracker
self.feature_tracker = feature_tracker
# Flags
self.imu_started = False
self.cams_started = False
# Data
self.graph = FactorGraph()
self.pose_init = None
self.imu_buf = ImuBuffer()
self.imu_params = None
self.cam_params = {}
self.cam_geoms = {}
self.cam_exts = {}
self.features = {}
self.keyframes = []
# Settings
self.window_size = 10
def nb_cams(self):
""" Return number of cameras """
return len(self.cam_params)
def nb_keyframes(self):
""" Return number of keyframes """
return len(self.keyframes)
def nb_features(self):
""" Return number of keyframes """
return len(self.features)
def add_imu(self, imu_params):
""" Add imu """
self.imu_params = imu_params
def add_camera(self, cam_idx, cam_params, cam_exts):
""" Add camera """
self.cam_params[cam_idx] = cam_params
self.cam_geoms[cam_idx] = cam_params.data
self.cam_exts[cam_idx] = cam_exts
self.graph.add_param(cam_params)
self.graph.add_param(cam_exts)
self.feature_tracker.add_camera(cam_idx, cam_params, cam_exts)
def add_overlap(self, cam_i, cam_j):
""" Add overlap """
self.feature_tracker.add_overlap(cam_i, cam_j)
def set_initial_pose(self, T_WB):
""" Set initial pose """
assert self.pose_init is None
self.pose_init = T_WB
def inertial_callback(self, ts, acc, gyr):
""" Inertial callback """
if self.imu_params is None:
raise RuntimeError("Forgot to add imu to tracker?")
self.imu_buf.add(ts, acc, gyr)
self.imu_started = True
def _triangulate(self, cam_i, cam_j, z_i, z_j, T_WB):
""" Triangulate feature """
# Setup
cam_params_i = self.cam_params[cam_i]
cam_params_j = self.cam_params[cam_j]
cam_geom_i = cam_params_i.data
cam_geom_j = cam_params_j.data
cam_exts_i = self.cam_exts[cam_i]
cam_exts_j = self.cam_exts[cam_j]
# Form projection matrices P_i and P_j
T_BCi = pose2tf(cam_exts_i.param)
T_BCj = pose2tf(cam_exts_j.param)
T_CiCj = inv(T_BCi) @ T_BCj
P_i = pinhole_P(cam_geom_i.proj_params(cam_params_i.param), eye(4))
P_j = pinhole_P(cam_geom_j.proj_params(cam_params_j.param), T_CiCj)
# Undistort image points z_i and z_j
x_i = cam_geom_i.undistort(cam_params_i.param, z_i)
x_j = cam_geom_j.undistort(cam_params_j.param, z_j)
# Linear triangulate
p_Ci = linear_triangulation(P_i, P_j, x_i, x_j)
if p_Ci[2] < 0.0:
return None
# Transform feature from camera frame to world frame
T_BCi = pose2tf(self.cam_exts[cam_i].param)
p_W = tf_point(T_WB @ T_BCi, p_Ci)
return p_W
def _add_pose(self, ts, T_WB):
"""
Add pose
Args:
T_WB (np.array): Body pose in world frame
"""
pose = pose_setup(ts, T_WB)
self.graph.add_param(pose)
return pose
def _get_last_pose(self):
""" Get last pose """
return pose2tf(self.keyframes[-1].pose.param)
def _add_feature(self, fid, ts, cam_idx, kp):
"""
Add feature
Args:
fid (int): Feature id
ts (int): Timestamp
cam_idx (int): Camera index
kp (cv2.KeyPoint): Key point
"""
assert hasattr(kp, 'pt')
self.features[fid] = feature_setup(zeros((3,)))
self.features[fid].data.update(ts, cam_idx, kp.pt)
feature_pid = self.graph.add_param(self.features[fid])
return feature_pid
def _update_feature(self, fid, ts, cam_idx, kp, T_WB):
"""
Update feature
Args:
fid (int): Feature id
ts (int): Timestamp
cam_idx (int): Camera index
kp (cv2.KeyPoint): Key point
T_WB (np.array): Body pose in world frame
"""
# Update feature
self.features[fid].data.update(ts, cam_idx, kp.pt)
# Initialize overlapping features
has_inited = self.features[fid].data.initialized()
has_overlap = self.features[fid].data.has_overlap(ts)
if has_inited is False and has_overlap is True:
overlaps = self.features[fid].data.get_overlaps(ts)
cam_i, z_i = overlaps[0]
cam_j, z_j = overlaps[1]
p_W = self._triangulate(cam_i, cam_j, z_i, z_j, T_WB)
if p_W is not None:
self.features[fid].param = p_W
self.features[fid].data.set_initialized()
def _process_features(self, ts, ft_data, pose):
""" Process features
Args:
ts (int): Timestamp
ft_data (Dict[int, FeatureTrackerData]): Multi-camera feature tracker data
pose (StateVariable): Body pose in world frame
"""
# Add or update feature
T_WB = pose2tf(pose.param)
for cam_idx, cam_data in ft_data.items():
for fid, kp in zip(cam_data.feature_ids, cam_data.keypoints):
if fid not in self.features:
self._add_feature(fid, ts, cam_idx, kp)
else:
self._update_feature(fid, ts, cam_idx, kp, T_WB)
def _add_keyframe(self, ts, mcam_imgs, ft_data, pose):
"""
Add keyframe
Args:
ts (int): Timestamp
mcam_imgs (Dict[int, np.array]): Multi-camera images
ft_data (Dict[int, FeatureTrackerData]): Multi-camera features
pose (Pose): Body pose in world frame
"""
vision_factors = []
for cam_idx, cam_data in ft_data.items():
# camera params, geometry and extrinsics
cam_params = self.cam_params[cam_idx]
cam_geom = self.cam_geoms[cam_idx]
cam_exts = self.cam_exts[cam_idx]
# Form vision factors
for fid, kp in zip(cam_data.feature_ids, cam_data.keypoints):
feature = self.features[fid]
if feature.data.initialized() is False:
continue
# Form vision factor
param_ids = []
param_ids.append(pose.param_id)
param_ids.append(cam_exts.param_id)
param_ids.append(feature.param_id)
param_ids.append(cam_params.param_id)
factor = VisionFactor(cam_geom, param_ids, kp.pt)
vision_factors.append(factor)
self.graph.add_factor(factor)
# Form keyframe
self.keyframes.append(KeyFrame(ts, mcam_imgs, pose, vision_factors))
def _pop_old_keyframe(self):
""" Pop old keyframe """
# Remove pose parameter and vision factors
kf = self.keyframes[0]
self.graph.remove_param(kf.pose)
for factor in kf.vision_factors:
self.graph.remove_factor(factor)
# Pop the front of the queue
self.keyframes.pop(0)
def _filter_keyframe_factors(self, filter_from=0):
""" Filter keyframe factors """
removed = 0
for kf in self.keyframes[filter_from:]:
# Calculate reprojection error
reproj_errors = []
for factor in list(kf.vision_factors):
# factor_params = self.graph._get_factor_params(factor)
factor_params = []
r, _ = factor.eval(factor_params)
reproj_errors.append(norm(r))
# Filter factors
threshold = 3.0 * np.std(reproj_errors)
filtered_factors = []
for reproj_error, factor in zip(reproj_errors, kf.vision_factors):
if reproj_error >= threshold:
self.graph.remove_factor(factor)
removed += 1
else:
filtered_factors.append(factor)
kf.vision_factors = filtered_factors
def vision_callback(self, ts, mcam_imgs):
"""
Vision callback
Args:
ts (int): Timestamp
mcam_imgs (Dict[int, np.array]): Multi-camera images
"""
assert self.pose_init is not None
# Has IMU?
if self.imu_params is not None and self.imu_started is False:
return
# Perform feature tracking
ft_data = self.feature_tracker.update(ts, mcam_imgs)
# Add pose
pose = None
if self.nb_keyframes() == 0:
pose = self._add_pose(ts, self.pose_init)
else:
T_WB = self._get_last_pose()
pose = self._add_pose(ts, T_WB)
# Process features, add keyframe and solve
self._process_features(ts, ft_data, pose)
self._add_keyframe(ts, mcam_imgs, ft_data, pose)
if self.nb_keyframes() != 1:
self.graph.solve(True)
self._filter_keyframe_factors()
if len(self.keyframes) > self.window_size:
self._pop_old_keyframe()
errors = self.graph.get_reproj_errors()
print(f"reproj_error:", end=" [")
print(f"mean: {np.mean(errors):.2f}", end=", ")
print(f"median: {np.median(errors):.2f}", end=", ")
print(f"rms: {rmse(errors):.2f}", end=", ")
print(f"max: {np.max(errors):.2f}", end="]\n")
print(f"nb_keyframes: {self.nb_keyframes()}")
print()
###############################################################################
# CALIBRATION
###############################################################################
class AprilGrid:
""" AprilGrid """
def __init__(self, tag_rows=6, tag_cols=6, tag_size=0.088, tag_spacing=0.3):
self.tag_rows = tag_rows
self.tag_cols = tag_cols
self.tag_size = tag_size
self.tag_spacing = tag_spacing
self.nb_tags = self.tag_rows * self.tag_cols
self.ts = None
self.data = {}
@staticmethod
def load(csv_file):
""" Load AprilGrid """
# Load csv file
csv_data = pandas.read_csv(csv_file)
if csv_data.shape[0] == 0:
return None
# AprilGrid properties
ts = csv_data['#ts'][0]
tag_rows = csv_data['tag_rows'][0]
tag_cols = csv_data['tag_cols'][0]
tag_size = csv_data['tag_size'][0]
tag_spacing = csv_data['tag_spacing'][0]
# AprilGrid measurements
tag_indices = csv_data['tag_id']
corner_indices = csv_data['corner_idx']
kps = np.array([csv_data['kp_x'], csv_data['kp_y']]).T
# Form AprilGrid
grid = AprilGrid(tag_rows, tag_cols, tag_size, tag_spacing)
for tag_id, corner_idx, kp in zip(tag_indices, corner_indices, kps):
grid.add_keypoint(ts, tag_id, corner_idx, kp)
return grid
def get_object_point(self, tag_id, corner_idx):
""" Form object point """
# Calculate the AprilGrid index using tag id
[i, j] = self.get_grid_index(tag_id)
# Calculate the x and y of the tag origin (bottom left corner of tag)
# relative to grid origin (bottom left corner of entire grid)
x = j * (self.tag_size + self.tag_size * self.tag_spacing)
y = i * (self.tag_size + self.tag_size * self.tag_spacing)
# Corners from bottom left in counter-clockwise fashion
if corner_idx == 0:
# Bottom left
return np.array([x, y, 0])
elif corner_idx == 1:
# Bottom right
return np.array([x + self.tag_size, y, 0])
elif corner_idx == 2:
# Top right
return np.array([x + self.tag_size, y + self.tag_size, 0])
elif corner_idx == 3:
# Top left
return np.array([x, y + self.tag_size, 0])
raise RuntimeError(f"Invalid tag_id[{tag_id}] corner_idx[{corner_idx}]!")
def get_object_points(self):
""" Form object points """
object_points = []
for tag_id in range(self.nb_tags):
for corner_idx in range(4):
object_points.append(self.get_object_point(tag_id, corner_idx))
return np.array(object_points)
def get_center(self):
""" Calculate center of aprilgrid """
x = (self.tag_cols / 2.0) * self.tag_size
x += ((self.tag_cols / 2.0) - 1) * self.tag_spacing * self.tag_size
x += 0.5 * self.tag_spacing * self.tag_size
y = (self.tag_rows / 2.0) * self.tag_size
y += ((self.tag_rows / 2.0) - 1) * self.tag_spacing * self.tag_size
y += 0.5 * self.tag_spacing * self.tag_size
return np.array([x, y])
def get_grid_index(self, tag_id):
""" Calculate grid index from tag id """
assert tag_id < (self.nb_tags) and tag_id >= 0
i = int(tag_id / self.tag_cols)
j = int(tag_id % self.tag_cols)
return (i, j)
def add_keypoint(self, ts, tag_id, corner_idx, kp):
""" Add keypoint """
self.ts = ts
if tag_id not in self.data:
self.data[tag_id] = {}
self.data[tag_id][corner_idx] = kp
def remove_keypoint(self, tag_id, corner_idx):
""" Remove keypoint """
assert tag_id in self.data
assert corner_idx in self.data[tag_id]
del self.data[tag_id][corner_idx]
def get_measurements(self):
""" Get measurements """
data = []
for tag_id, tag_data in self.data.items():
for corner_idx, kp in tag_data.items():
obj_point = self.get_object_point(tag_id, corner_idx)
data.append((tag_id, corner_idx, obj_point, kp))
return data
def solvepnp(self, cam_params):
""" Estimate relative transform between camera and aprilgrid """
# Check if we actually have data to work with
if not self.data:
return None
# Create object points (counter-clockwise, from bottom left)
cam_geom = cam_params.data
obj_pts = []
img_pts = []
for (_, _, r_FFi, z) in self.get_measurements():
img_pts.append(cam_geom.undistort(cam_params.param, z))
obj_pts.append(r_FFi)
obj_pts = np.array(obj_pts)
img_pts = np.array(img_pts)
# Solve pnp
K = pinhole_K(cam_params.param[0:4])
D = np.array([0.0, 0.0, 0.0, 0.0])
flags = cv2.SOLVEPNP_ITERATIVE
_, rvec, tvec = cv2.solvePnP(obj_pts, img_pts, K, D, False, flags=flags)
# Form relative tag pose as a 4x4 transform matrix
C, _ = cv2.Rodrigues(rvec)
r = tvec.flatten()
T_CF = tf(C, r)
return T_CF
def plot(self, ax, T_WF):
""" Plot """
obj_pts = self.get_object_points()
for row_idx in range(obj_pts.shape[0]):
r_FFi = obj_pts[row_idx, :]
r_WFi = tf_point(T_WF, r_FFi)
ax.plot(r_WFi[0], r_WFi[1], r_WFi[2], 'r.')
def calib_generate_poses(calib_target, **kwargs):
""" Generate calibration poses infront of the calibration target """
# Pose settings
x_range = kwargs.get('x_range', np.linspace(-0.3, 0.3, 5))
y_range = kwargs.get('y_range', np.linspace(-0.3, 0.3, 5))
z_range = kwargs.get('z_range', np.linspace(0.3, 0.5, 5))
# Generate camera positions infront of the calib target r_FC
calib_center = np.array([*calib_target.get_center(), 0.0])
cam_pos = []
pos_idx = 0
for x in x_range:
for y in y_range:
for z in z_range:
r_FC = np.array([x, y, z]) + calib_center
cam_pos.append(r_FC)
pos_idx += 1
# For each position create a camera pose that "looks at" the calib
# center in the target frame, T_FC.
return [lookat(r_FC, calib_center) for r_FC in cam_pos]
def calib_generate_random_poses(calib_target, **kwargs):
""" Generate random calibration poses infront of the calibration target """
# Settings
nb_poses = kwargs.get('nb_poses', 30)
att_range = kwargs.get('att_range', [deg2rad(-10.0), deg2rad(10.0)])
x_range = kwargs.get('x_range', [-0.5, 0.5])
y_range = kwargs.get('y_range', [-0.5, 0.5])
z_range = kwargs.get('z_range', [0.5, 0.7])
# For each position create a camera pose that "looks at" the calibration
# center in the target frame, T_FC.
calib_center = np.array([*calib_target.get_center(), 0.0])
poses = []
for _ in range(nb_poses):
# Generate random pose
x = np.random.uniform(x_range[0], x_range[1])
y = np.random.uniform(y_range[0], y_range[1])
z = np.random.uniform(z_range[0], z_range[1])
r_FC = calib_center + np.array([x, y, z])
T_FC = lookat(r_FC, calib_center)
# Perturb the pose a little so it doesn't look at the center directly
yaw = np.random.uniform(*att_range)
pitch = np.random.uniform(*att_range)
roll = np.random.uniform(*att_range)
C_perturb = euler321(yaw, pitch, roll)
r_perturb = zeros((3,))
T_perturb = tf(C_perturb, r_perturb)
poses.append(T_FC @ T_perturb)
return poses
class CalibView:
""" Calibration View """
def __init__(self, pose, cam_params, cam_exts, grid):
self.ts = grid.ts
self.pose = pose
self.cam_idx = cam_params.data.cam_idx
self.cam_params = cam_params
self.cam_geom = cam_params.data
self.cam_exts = cam_exts
self.grid = grid
self.factors = []
pids = [pose.param_id, cam_exts.param_id, cam_params.param_id]
for grid_data in grid.get_measurements():
self.factors.append(CalibVisionFactor(self.cam_geom, pids, grid_data))
def get_reproj_errors(self):
""" Get reprojection errors """
reproj_errors = []
factor_params = [self.pose, self.cam_exts, self.cam_params]
for factor in self.factors:
reproj_error = factor.get_reproj_error(*factor_params)
if reproj_error is not None:
reproj_errors.append(reproj_error)
return reproj_errors
class Calibrator:
""" Calibrator """
def __init__(self):
# Parameters
self.cam_geoms = {}
self.cam_params = {}
self.cam_exts = {}
self.imu_params = None
# Data
self.graph = FactorGraph()
self.poses = {}
self.calib_views = {}
def get_num_cams(self):
""" Return number of cameras """
return len(self.cam_params)
def get_num_views(self):
""" Return number of views """
return len(self.calib_views)
def add_camera(self, cam_idx, cam_res, proj_model, dist_model):
""" Add camera """
fx = focal_length(cam_res[0], 90.0)
fy = focal_length(cam_res[1], 90.0)
cx = cam_res[0] / 2.0
cy = cam_res[1] / 2.0
params = [fx, fy, cx, cy, 0.0, 0.0, 0.0, 0.0]
args = [cam_idx, cam_res, proj_model, dist_model, params]
cam_params = camera_params_setup(*args)
fix_exts = True if cam_idx == 0 else False
self.cam_geoms[cam_idx] = cam_params.data
self.cam_params[cam_idx] = cam_params
self.cam_exts[cam_idx] = extrinsics_setup(eye(4), fix=fix_exts)
self.graph.add_param(self.cam_params[cam_idx])
self.graph.add_param(self.cam_exts[cam_idx])
def add_imu(self, imu_params):
""" Add imu """
self.imu_params = imu_params
def add_camera_view(self, ts, cam_idx, grid):
""" Add camera view """
# Estimate relative pose T_BF
cam_params = self.cam_params[cam_idx]
cam_exts = self.cam_exts[cam_idx]
T_CiF = grid.solvepnp(cam_params)
T_BCi = pose2tf(cam_exts.param)
T_BF = T_BCi @ T_CiF
self.poses[ts] = pose_setup(ts, T_BF)
# CalibView
self.graph.add_param(self.poses[ts])
self.calib_views[ts] = CalibView(self.poses[ts], cam_params, cam_exts, grid)
for factor in self.calib_views[ts].factors:
self.graph.add_factor(factor)
# Solve
if len(self.calib_views) >= 5:
self.graph.solver_max_iter = 10
self.graph.solve(True)
# Calculate reprojection error
reproj_errors = self.graph.get_reproj_errors()
print(f"nb_reproj_errors: {len(reproj_errors)}")
print(f"rms_reproj_errors: {rmse(reproj_errors):.4f} [px]")
print()
# plt.hist(reproj_errors)
# plt.show()
def solve(self):
""" Solve """
self.graph.solver_max_iter = 30
self.graph.solve(True)
reproj_errors = self.graph.get_reproj_errors()
print(f"nb_cams: {self.get_num_cams()}")
print(f"nb_views: {self.get_num_views()}")
print(f"nb_reproj_errors: {len(reproj_errors)}")
print(f"rms_reproj_errors: {rmse(reproj_errors):.4f} [px]")
sys.stdout.flush()
###############################################################################
# SIMULATION
###############################################################################
# UTILS #######################################################################
def create_3d_features(x_bounds, y_bounds, z_bounds, nb_features):
""" Create 3D features randomly """
features = zeros((nb_features, 3))
for i in range(nb_features):
features[i, 0] = random.uniform(*x_bounds)
features[i, 1] = random.uniform(*y_bounds)
features[i, 2] = random.uniform(*z_bounds)
return features
def create_3d_features_perimeter(origin, dim, nb_features):
""" Create 3D features in a square """
assert len(origin) == 3
assert len(dim) == 3
assert nb_features > 0
# Dimension of the outskirt
w, l, h = dim
# Features per side
nb_fps = int(nb_features / 4.0)
# Features in the east side
x_bounds = [origin[0] - w, origin[0] + w]
y_bounds = [origin[1] + l, origin[1] + l]
z_bounds = [origin[2] - h, origin[2] + h]
east = create_3d_features(x_bounds, y_bounds, z_bounds, nb_fps)
# Features in the north side
x_bounds = [origin[0] + w, origin[0] + w]
y_bounds = [origin[1] - l, origin[1] + l]
z_bounds = [origin[2] - h, origin[2] + h]
north = create_3d_features(x_bounds, y_bounds, z_bounds, nb_fps)
# Features in the west side
x_bounds = [origin[0] - w, origin[0] + w]
y_bounds = [origin[1] - l, origin[1] - l]
z_bounds = [origin[2] - h, origin[2] + h]
west = create_3d_features(x_bounds, y_bounds, z_bounds, nb_fps)
# Features in the south side
x_bounds = [origin[0] - w, origin[0] - w]
y_bounds = [origin[1] - l, origin[1] + l]
z_bounds = [origin[2] - h, origin[2] + h]
south = create_3d_features(x_bounds, y_bounds, z_bounds, nb_fps)
# Stack features and return
return np.block([[east], [north], [west], [south]])
# SIMULATION ##################################################################
class SimCameraFrame:
""" Sim camera frame """
def __init__(self, ts, cam_idx, camera, T_WCi, features):
assert T_WCi.shape == (4, 4)
assert features.shape[0] > 0
assert features.shape[1] == 3
self.ts = ts
self.cam_idx = cam_idx
self.T_WCi = T_WCi
self.cam_geom = camera.data
self.cam_params = camera.param
self.feature_ids = []
self.measurements = []
# Simulate camera frame
nb_points = features.shape[0]
T_CiW = tf_inv(self.T_WCi)
for i in range(nb_points):
# Project point from world frame to camera frame
p_W = features[i, :]
p_C = tf_point(T_CiW, p_W)
z = self.cam_geom.project(self.cam_params, p_C)
if z is not None:
self.measurements.append(z)
self.feature_ids.append(i)
def num_measurements(self):
""" Return number of measurements """
return len(self.measurements)
def draw_measurements(self):
""" Returns camera measurements in an image """
kps = [kp for kp in self.measurements]
img_w, img_h = self.cam_geom.resolution
img = np.zeros((img_h, img_w), dtype=np.uint8)
return draw_keypoints(img, kps)
class SimCameraData:
""" Sim camera data """
def __init__(self, cam_idx, camera, features):
self.cam_idx = cam_idx
self.camera = camera
self.features = features
self.timestamps = []
self.poses = {}
self.frames = {}
class SimImuData:
""" Sim imu data """
def __init__(self, imu_idx):
self.imu_idx = imu_idx
self.timestamps = []
self.poses = {}
self.vel = {}
self.acc = {}
self.gyr = {}
def form_imu_buffer(self, start_idx, end_idx):
""" Form ImuBuffer """
imu_ts = self.timestamps[start_idx:end_idx]
imu_acc = []
imu_gyr = []
for ts in self.timestamps:
imu_acc.append(self.acc[ts])
imu_gyr.append(self.gyr[ts])
return ImuBuffer(imu_ts, imu_acc, imu_gyr)
class SimData:
""" Sim data """
def __init__(self, circle_r, circle_v, **kwargs):
# Settings
self.circle_r = circle_r
self.circle_v = circle_v
self.cam_rate = 10.0
self.imu_rate = 200.0
self.nb_features = 200
# Trajectory data
self.g = np.array([0.0, 0.0, 9.81])
self.circle_dist = 2.0 * pi * circle_r
self.time_taken = self.circle_dist / self.circle_v
self.w = -2.0 * pi * (1.0 / self.time_taken)
self.theta_init = pi
self.yaw_init = pi / 2.0
self.features = self._setup_features()
# Simulate IMU
self.imu0_data = None
if kwargs.get("sim_imu", True):
self.imu0_data = self._sim_imu(0)
# Simulate camera
self.mcam_data = {}
self.cam_exts = {}
if kwargs.get("sim_cams", True):
# -- cam0
self.cam0_params = self._setup_camera(0)
C_BC0 = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_BC0 = np.array([0.0, 0.0, 0.0])
self.T_BC0 = tf(C_BC0, r_BC0)
self.mcam_data[0] = self._sim_cam(0, self.cam0_params, self.T_BC0)
self.cam_exts[0] = extrinsics_setup(self.T_BC0)
# -- cam1
self.cam1_params = self._setup_camera(1)
C_BC1 = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_BC1 = np.array([0.0, 0.0, 0.0])
self.T_BC1 = tf(C_BC1, r_BC1)
# -- Multicam data
self.mcam_data[1] = self._sim_cam(1, self.cam1_params, self.T_BC1)
self.cam_exts[1] = extrinsics_setup(self.T_BC1)
# Timeline
self.timeline = self._form_timeline()
def get_camera_data(self, cam_idx):
""" Get camera data """
return self.mcam_data[cam_idx]
def get_camera_params(self, cam_idx):
""" Get camera parameters """
return self.mcam_data[cam_idx].camera
def get_camera_geometry(self, cam_idx):
""" Get camera geometry """
return self.mcam_data[cam_idx].camera.data
def get_camera_extrinsics(self, cam_idx):
""" Get camera extrinsics """
return self.cam_exts[cam_idx]
def plot_scene(self):
""" Plot 3D Scene """
# Setup
plt.figure()
ax = plt.axes(projection='3d')
# Plot features
features = self.features
ax.scatter3D(features[:, 0], features[:, 1], features[:, 2])
# Plot camera frames
idx = 0
for _, T_WB in self.imu0_data.poses.items():
if idx % 100 == 0:
T_BC0 = pose2tf(self.cam_exts[0].param)
T_BC1 = pose2tf(self.cam_exts[1].param)
plot_tf(ax, T_WB @ T_BC0)
plot_tf(ax, T_WB @ T_BC1)
if idx > 3000:
break
idx += 1
# Show
plt.show()
@staticmethod
def create_or_load(circle_r, circle_v, pickle_path):
""" Create or load SimData """
sim_data = None
if os.path.exists(pickle_path):
with open(pickle_path, 'rb') as f:
sim_data = pickle.load(f)
else:
sim_data = SimData(circle_r, circle_v)
with open(pickle_path, 'wb') as f:
pickle.dump(sim_data, f)
f.flush()
return sim_data
@staticmethod
def _setup_camera(cam_idx):
""" Setup camera """
res = [640, 480]
fov = 120.0
fx = focal_length(res[0], fov)
fy = focal_length(res[0], fov)
cx = res[0] / 2.0
cy = res[1] / 2.0
proj_model = "pinhole"
dist_model = "radtan4"
proj_params = [fx, fy, cx, cy]
dist_params = [0.0, 0.0, 0.0, 0.0]
params = np.block([*proj_params, *dist_params])
return camera_params_setup(cam_idx, res, proj_model, dist_model, params)
def _setup_features(self):
""" Setup features """
origin = [0, 0, 0]
dim = [self.circle_r * 2.0, self.circle_r * 2.0, self.circle_r * 1.5]
return create_3d_features_perimeter(origin, dim, self.nb_features)
def _sim_imu(self, imu_idx):
""" Simulate IMU """
sim_data = SimImuData(imu_idx)
time = 0.0
dt = 1.0 / self.imu_rate
theta = self.theta_init
yaw = self.yaw_init
while time <= self.time_taken:
# Timestamp
ts = sec2ts(time)
# IMU pose
rx = self.circle_r * cos(theta)
ry = self.circle_r * sin(theta)
rz = 0.0
r_WS = np.array([rx, ry, rz])
C_WS = euler321(yaw, 0.0, 0.0)
T_WS = tf(C_WS, r_WS)
# IMU velocity
vx = -self.circle_r * self.w * sin(theta)
vy = self.circle_r * self.w * cos(theta)
vz = 0.0
v_WS = np.array([vx, vy, vz])
# IMU acceleration
ax = -self.circle_r * self.w**2 * cos(theta)
ay = -self.circle_r * self.w**2 * sin(theta)
az = 0.0
a_WS = np.array([ax, ay, az])
# IMU angular velocity
wx = 0.0
wy = 0.0
wz = self.w
w_WS = np.array([wx, wy, wz])
# IMU measurements
acc = C_WS.T @ (a_WS + self.g)
gyr = C_WS.T @ w_WS
# Update
sim_data.timestamps.append(ts)
sim_data.poses[ts] = T_WS
sim_data.vel[ts] = v_WS
sim_data.acc[ts] = acc
sim_data.gyr[ts] = gyr
theta += self.w * dt
yaw += self.w * dt
time += dt
return sim_data
def _sim_cam(self, cam_idx, cam_params, T_BCi):
""" Simulate camera """
sim_data = SimCameraData(cam_idx, cam_params, self.features)
time = 0.0
dt = 1.0 / self.cam_rate
theta = self.theta_init
yaw = self.yaw_init
while time <= self.time_taken:
# Timestamp
ts = sec2ts(time)
# Body pose
rx = self.circle_r * cos(theta)
ry = self.circle_r * sin(theta)
rz = 0.0
r_WB = [rx, ry, rz]
C_WB = euler321(yaw, 0.0, 0.0)
T_WB = tf(C_WB, r_WB)
# Simulate camera pose and camera frame
T_WCi = T_WB @ T_BCi
cam_frame = SimCameraFrame(ts, cam_idx, cam_params, T_WCi, self.features)
sim_data.timestamps.append(ts)
sim_data.poses[ts] = T_WCi
sim_data.frames[ts] = cam_frame
# Update
theta += self.w * dt
yaw += self.w * dt
time += dt
return sim_data
def _form_timeline(self):
""" Form timeline """
# Form timeline
timeline = Timeline()
# -- Add imu events
imu_idx = self.imu0_data.imu_idx
for ts in self.imu0_data.timestamps:
acc = self.imu0_data.acc[ts]
gyr = self.imu0_data.gyr[ts]
imu_event = ImuEvent(ts, imu_idx, acc, gyr)
timeline.add_event(ts, imu_event)
# -- Add camera events
for cam_idx, cam_data in self.mcam_data.items():
for ts in cam_data.timestamps:
frame = cam_data.frames[ts]
fids = frame.feature_ids
kps = frame.measurements
sim_img = []
for i, fid in enumerate(fids):
sim_img.append([fid, kps[i]])
cam_event = CameraEvent(ts, cam_idx, sim_img)
timeline.add_event(ts, cam_event)
return timeline
class SimFeatureTracker(FeatureTracker):
""" Sim Feature Tracker """
def __init__(self):
FeatureTracker.__init__(self)
def update(self, ts, mcam_imgs):
""" Update Sim Feature Tracker """
for cam_idx, cam_data in mcam_imgs.items():
kps = [data[1] for data in cam_data]
fids = [data[0] for data in cam_data]
ft_data = FeatureTrackerData(cam_idx, None, kps, fids)
self.cam_data[cam_idx] = ft_data
# Update
self.frame_idx += 1
self.prev_ts = ts
self.prev_mcam_imgs = mcam_imgs
return self.cam_data
def visualize(self):
""" Visualize """
# Image size
# cam_res = cam0_params.data.resolution
# img_w, img_h = cam_res
# img0 = np.zeros((img_h, img_w), dtype=np.uint8)
# kps = [kp for kp in ft_data[0].keypoints]
# viz = draw_keypoints(img0, kps)
# cv2.imshow('viz', viz)
# cv2.waitKey(0)
pass
###############################################################################
# CONTROL
###############################################################################
class PID:
""" PID controller """
def __init__(self, k_p, k_i, k_d):
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.error_p = 0.0
self.error_i = 0.0
self.error_d = 0.0
self.error_prev = 0.0
self.error_sum = 0.0
def update(self, setpoint, actual, dt):
""" Update """
# Calculate errors
error = setpoint - actual
self.error_sum += error * dt
# Calculate output
self.error_p = self.k_p * error
self.error_i = self.k_i * self.error_sum
self.error_d = self.k_d * (error - self.error_prev) / dt
output = self.error_p + self.error_i + self.error_d
# Keep track of error
self.error_prev = error
return output
def reset(self):
""" Reset """
class CarrotController:
""" Carrot Controller """
def __init__(self):
self.waypoints = []
self.wp_start = None
self.wp_end = None
self.wp_index = None
self.look_ahead_dist = 0.0
def _calculate_closest_point(self, pos):
""" Calculate closest point """
v1 = pos - self.wp_start
v2 = self.wp_end - self.wp_start
t = v1 @ v2 / v2.squaredNorm()
pt = self.wp_start + t * v2
return (t, pt)
def _calculate_carrot_point(self, pos):
""" Calculate carrot point """
assert len(pos) == 3
t, closest_pt = self._calculate_closest_point(pos)
carrot_pt = None
if t == -1:
# Closest point is before wp_start
carrot_pt = self.wp_start
elif t == 0:
# Closest point is between wp_start wp_end
u = self.wp_end - self.wp_start
v = u / norm(u)
carrot_pt = closest_pt + self.look_ahead_dist * v
elif t == 1:
# Closest point is after wp_end
carrot_pt = self.wp_end
return (t, carrot_pt)
def update(self, pos):
""" Update """
assert len(pos) == 3
# Calculate new carot point
status, carrot_pt = self._calculate_carrot_point(pos)
# Check if there are more waypoints
if (self.wp_index + 1) == len(self.waypoints):
return None
# Update waypoints
if status == 1:
self.wp_index += 1
self.wp_start = self.wp_end
self.wp_end = self.waypoints[self.wp_index]
return carrot_pt
###############################################################################
# Visualizer
###############################################################################
import websockets
import asyncio
class DevServer:
""" Dev server """
def __init__(self, loop_fn):
self.host = "127.0.0.1"
self.port = 5000
self.loop_fn = loop_fn
def run(self):
""" Run server """
kwargs = {"ping_timeout": 1, "close_timeout": 1}
server = websockets.serve(self.loop_fn, self.host, self.port, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(server)
loop.run_forever()
@staticmethod
def stop():
""" Stop server """
asyncio.get_event_loop().stop()
class MultiPlot:
""" MultiPlot """
def __init__(self, has_gnd=False):
self.plots = []
self.add_pos_xy_plot(has_gnd=has_gnd)
self.add_pos_z_plot(has_gnd=has_gnd)
self.add_roll_plot(has_gnd=has_gnd)
self.add_pitch_plot(has_gnd=has_gnd)
self.add_yaw_plot(has_gnd=has_gnd)
self.add_pos_error_plot()
self.add_att_error_plot()
self.add_reproj_error_plot()
self.plot_data = {}
self.emit_rate = 8.0 # Hz
self.last_updated = datetime.now()
def _add_plot(self, title, xlabel, ylabel, trace_names, **kwargs):
conf = {}
conf["title"] = title
conf["width"] = kwargs.get("width", 300)
conf["height"] = kwargs.get("height", 280)
conf["buf_size"] = kwargs.get("buf_size", 100)
conf["trace_names"] = trace_names
conf["xlabel"] = xlabel
conf["ylabel"] = ylabel
conf["show_legend"] = True if len(trace_names) > 1 else False
self.plots.append(conf)
def add_pos_xy_plot(self, **kwargs):
""" Add Position X-Y Data """
title = "Position X-Y"
xlabel = "x [m]"
ylabel = "y [m]"
trace_names = ["Estimate"]
if kwargs.get("has_gnd"):
trace_names.append("Ground-Truth")
self._add_plot(title, xlabel, ylabel, trace_names)
def add_pos_z_plot(self, **kwargs):
""" Add Position Z Data """
xlabel = "Time [s]"
ylabel = "y [m]"
trace_names = ["Estimate"]
if kwargs.get("has_gnd"):
trace_names.append("Ground-Truth")
self._add_plot("Position Z", xlabel, ylabel, trace_names)
def add_roll_plot(self, **kwargs):
""" Add Roll Data """
xlabel = "Time [s]"
ylabel = "Attitude [deg]"
trace_names = ["Estimate"]
if kwargs.get("has_gnd"):
trace_names.append("Ground-Truth")
self._add_plot("Roll", xlabel, ylabel, trace_names)
def add_pitch_plot(self, **kwargs):
""" Add Roll Data """
xlabel = "Time [s]"
ylabel = "Attitude [deg]"
trace_names = ["Estimate"]
if kwargs.get("has_gnd"):
trace_names.append("Ground-Truth")
self._add_plot("Pitch", xlabel, ylabel, trace_names)
def add_yaw_plot(self, **kwargs):
""" Add Yaw Data """
xlabel = "Time [s]"
ylabel = "Attitude [deg]"
trace_names = ["Estimate"]
if kwargs.get("has_gnd"):
trace_names.append("Ground-Truth")
self._add_plot("Yaw", xlabel, ylabel, trace_names)
def add_pos_error_plot(self):
""" Add Position Error Data """
title = "Position Error"
xlabel = "Time [s]"
ylabel = "Position Error [m]"
trace_names = ["Error"]
self._add_plot(title, xlabel, ylabel, trace_names)
def add_att_error_plot(self):
""" Add Attitude Error Data """
title = "Attitude Error"
xlabel = "Time [s]"
ylabel = "Position Error [m]"
trace_names = ["Error"]
self._add_plot(title, xlabel, ylabel, trace_names)
def add_reproj_error_plot(self):
""" Add Reprojection Error Data """
title = "Reprojection Error"
xlabel = "Time [s]"
ylabel = "Reprojection Error [px]"
trace_names = ["Mean", "RMSE"]
self._add_plot(title, xlabel, ylabel, trace_names)
def _form_plot_data(self, plot_title, time_s, **kwargs):
gnd = kwargs.get("gnd")
est = kwargs.get("est")
err = kwargs.get("err")
conf = {plot_title: {}}
if gnd:
conf[plot_title]["Ground-Truth"] = {"x": time_s, "y": gnd}
if est:
conf[plot_title]["Estimate"] = {"x": time_s, "y": est}
if err:
conf[plot_title]["Error"] = {"x": time_s, "y": err}
self.plot_data.update(conf)
def add_pos_xy_data(self, **kwargs):
""" Add Position X-Y Data """
plot_title = "Position X-Y"
conf = {plot_title: {}}
if "gnd" in kwargs:
gnd = kwargs["gnd"]
conf[plot_title]["Ground-Truth"] = {"x": gnd[0], "y": gnd[1]}
if "est" in kwargs:
est = kwargs["est"]
conf[plot_title]["Estimate"] = {"x": est[0], "y": est[1]}
self.plot_data.update(conf)
def add_pos_z_data(self, time_s, **kwargs):
""" Add Position Z Data """
self._form_plot_data("Position Z", time_s, **kwargs)
def add_roll_data(self, time_s, **kwargs):
""" Add Roll Data """
self._form_plot_data("Roll", time_s, **kwargs)
def add_pitch_data(self, time_s, **kwargs):
""" Add Roll Data """
self._form_plot_data("Pitch", time_s, **kwargs)
def add_yaw_data(self, time_s, **kwargs):
""" Add Yaw Data """
self._form_plot_data("Yaw", time_s, **kwargs)
def add_pos_error_data(self, time_s, error):
""" Add Position Error Data """
self._form_plot_data("Position Error", time_s, err=error)
def add_att_error_data(self, time_s, error):
""" Add Attitude Error Data """
self._form_plot_data("Attitude Error", time_s, err=error)
def add_reproj_error_data(self, time_s, reproj_rmse, reproj_mean):
""" Add Reprojection Error Data """
plot_title = "Reprojection Error"
conf = {plot_title: {}}
conf[plot_title]["Mean"] = {"x": time_s, "y": reproj_rmse}
conf[plot_title]["RMSE"] = {"x": time_s, "y": reproj_mean}
self.plot_data.update(conf)
def get_plots(self):
""" Get plots """
return json.dumps(self.plots)
def get_plot_data(self):
""" Get plot data """
return json.dumps(self.plot_data)
async def emit_data(self, ws):
""" Emit data """
time_now = datetime.now()
time_diff = (time_now - self.last_updated).total_seconds()
if time_diff > (1.0 / self.emit_rate):
await ws.send(self.get_plot_data())
self.last_updated = time_now
###############################################################################
# UNITTESTS
###############################################################################
import unittest
euroc_data_path = '/data/euroc/raw/V1_01'
# LINEAR ALGEBRA ##############################################################
class TestLinearAlgebra(unittest.TestCase):
""" Test Linear Algebra """
def test_normalize(self):
""" Test normalize() """
x = np.array([1.0, 2.0, 3.0])
x_prime = normalize(x)
self.assertTrue(isclose(norm(x_prime), 1.0))
def test_skew(self):
""" Test skew() """
x = np.array([1.0, 2.0, 3.0])
S = np.array([[0.0, -3.0, 2.0], [3.0, 0.0, -1.0], [-2.0, 1.0, 0.0]])
self.assertTrue(matrix_equal(S, skew(x)))
def test_skew_inv(self):
""" Test skew_inv() """
x = np.array([1.0, 2.0, 3.0])
S = np.array([[0.0, -3.0, 2.0], [3.0, 0.0, -1.0], [-2.0, 1.0, 0.0]])
self.assertTrue(matrix_equal(x, skew_inv(S)))
def test_matrix_equal(self):
""" Test matrix_equal() """
A = ones((3, 3))
B = ones((3, 3))
self.assertTrue(matrix_equal(A, B))
C = 2.0 * ones((3, 3))
self.assertFalse(matrix_equal(A, C))
# def test_check_jacobian(self):
# step_size = 1e-6
# threshold = 1e-5
#
# x = 2
# y0 = x**2
# y1 = (x + step_size)**2
# jac = 2 * x
# fdiff = y1 - y0
#
# jac_name = "jac"
# fdiff = (y1 - y0) / step_size
# self.assertTrue(check_jacobian(jac_name, fdiff, jac, threshold))
class TestLie(unittest.TestCase):
""" Test Lie algebra functions """
def test_Exp_Log(self):
""" Test Exp() and Log() """
pass
# TRANSFORM ###################################################################
class TestTransform(unittest.TestCase):
""" Test transform functions """
def test_homogeneous(self):
""" Test homogeneous() """
p = np.array([1.0, 2.0, 3.0])
hp = homogeneous(p)
self.assertTrue(hp[0] == 1.0)
self.assertTrue(hp[1] == 2.0)
self.assertTrue(hp[2] == 3.0)
self.assertTrue(len(hp) == 4)
def test_dehomogeneous(self):
""" Test dehomogeneous() """
p = np.array([1.0, 2.0, 3.0])
hp = np.array([1.0, 2.0, 3.0, 1.0])
p = dehomogeneous(hp)
self.assertTrue(p[0] == 1.0)
self.assertTrue(p[1] == 2.0)
self.assertTrue(p[2] == 3.0)
self.assertTrue(len(p) == 3)
def test_rotx(self):
""" Test rotx() """
x = np.array([0.0, 1.0, 0.0])
C = rotx(deg2rad(90.0))
x_prime = C @ x
self.assertTrue(np.allclose(x_prime, [0.0, 0.0, 1.0]))
def test_roty(self):
""" Test roty() """
x = np.array([1.0, 0.0, 0.0])
C = roty(deg2rad(90.0))
x_prime = C @ x
self.assertTrue(np.allclose(x_prime, [0.0, 0.0, -1.0]))
def test_rotz(self):
""" Test rotz() """
x = np.array([1.0, 0.0, 0.0])
C = rotz(deg2rad(90.0))
x_prime = C @ x
self.assertTrue(np.allclose(x_prime, [0.0, 1.0, 0.0]))
def test_aa2quat(self):
""" Test aa2quat() """
pass
def test_rvec2rot(self):
""" Test rvec2quat() """
pass
def test_vecs2axisangle(self):
""" Test vecs2axisangle() """
pass
def test_euler321(self):
""" Test euler321() """
C = euler321(0.0, 0.0, 0.0)
self.assertTrue(np.array_equal(C, eye(3)))
def test_euler2quat_and_quat2euler(self):
""" Test euler2quat() and quat2euler() """
y_in = deg2rad(3.0)
p_in = deg2rad(2.0)
r_in = deg2rad(1.0)
q = euler2quat(y_in, p_in, r_in)
ypr_out = quat2euler(q)
self.assertTrue(len(q) == 4)
self.assertTrue(abs(y_in - ypr_out[0]) < 1e-5)
self.assertTrue(abs(p_in - ypr_out[1]) < 1e-5)
self.assertTrue(abs(r_in - ypr_out[2]) < 1e-5)
def test_quat2rot(self):
""" Test quat2rot() """
ypr = np.array([0.1, 0.2, 0.3])
C_i = euler321(*ypr)
C_j = quat2rot(euler2quat(*ypr))
self.assertTrue(np.allclose(C_i, C_j))
def test_rot2euler(self):
""" Test rot2euler() """
ypr = np.array([0.1, 0.2, 0.3])
C = euler321(*ypr)
euler = rot2euler(C)
self.assertTrue(np.allclose(ypr, euler))
def test_rot2quat(self):
""" Test rot2quat() """
ypr = np.array([0.1, 0.2, 0.3])
C = euler321(*ypr)
q = rot2quat(C)
self.assertTrue(np.allclose(quat2euler(q), ypr))
def test_quat_norm(self):
""" Test quat_norm() """
q = np.array([1.0, 0.0, 0.0, 0.0])
self.assertTrue(isclose(quat_norm(q), 1.0))
def test_quat_normalize(self):
""" Test quat_normalize() """
q = np.array([1.0, 0.1, 0.2, 0.3])
q = quat_normalize(q)
self.assertTrue(isclose(quat_norm(q), 1.0))
def test_quat_conj(self):
""" Test quat_conj() """
ypr = np.array([0.1, 0.0, 0.0])
q = rot2quat(euler321(*ypr))
q_conj = quat_conj(q)
self.assertTrue(np.allclose(quat2euler(q_conj), -1.0 * ypr))
def test_quat_inv(self):
""" Test quat_inv() """
ypr = np.array([0.1, 0.0, 0.0])
q = rot2quat(euler321(*ypr))
q_inv = quat_inv(q)
self.assertTrue(np.allclose(quat2euler(q_inv), -1.0 * ypr))
def test_quat_mul(self):
""" Test quat_mul() """
p = euler2quat(deg2rad(3.0), deg2rad(2.0), deg2rad(1.0))
q = euler2quat(deg2rad(1.0), deg2rad(2.0), deg2rad(3.0))
r = quat_mul(p, q)
self.assertTrue(r is not None)
def test_quat_omega(self):
""" Test quat_omega() """
pass
def test_quat_slerp(self):
""" Test quat_slerp() """
q_i = rot2quat(euler321(0.1, 0.0, 0.0))
q_j = rot2quat(euler321(0.2, 0.0, 0.0))
q_k = quat_slerp(q_i, q_j, 0.5)
self.assertTrue(np.allclose(quat2euler(q_k), [0.15, 0.0, 0.0]))
q_i = rot2quat(euler321(0.0, 0.1, 0.0))
q_j = rot2quat(euler321(0.0, 0.2, 0.0))
q_k = quat_slerp(q_i, q_j, 0.5)
self.assertTrue(np.allclose(quat2euler(q_k), [0.0, 0.15, 0.0]))
q_i = rot2quat(euler321(0.0, 0.0, 0.1))
q_j = rot2quat(euler321(0.0, 0.0, 0.2))
q_k = quat_slerp(q_i, q_j, 0.5)
self.assertTrue(np.allclose(quat2euler(q_k), [0.0, 0.0, 0.15]))
def test_tf(self):
""" Test tf() """
r = np.array([1.0, 2.0, 3.0])
q = np.array([0.0, 0.0, 0.0, 1.0])
T = tf(q, r)
self.assertTrue(np.allclose(T[0:3, 0:3], quat2rot(q)))
self.assertTrue(np.allclose(T[0:3, 3], r))
# CV ##########################################################################
class TestCV(unittest.TestCase):
""" Test computer vision functions """
def setUp(self):
# Camera
img_w = 640
img_h = 480
fx = focal_length(img_w, 90.0)
fy = focal_length(img_w, 90.0)
cx = img_w / 2.0
cy = img_h / 2.0
self.proj_params = [fx, fy, cx, cy]
# Camera pose in world frame
C_WC = euler321(-pi / 2, 0.0, -pi / 2)
r_WC = np.array([0.0, 0.0, 0.0])
self.T_WC = tf(C_WC, r_WC)
# 3D World point
self.p_W = np.array([10.0, 0.0, 0.0])
# Point w.r.t camera
self.p_C = tf_point(inv(self.T_WC), self.p_W)
self.x = np.array([self.p_C[0] / self.p_C[2], self.p_C[1] / self.p_C[2]])
def test_linear_triangulation(self):
""" Test linear_triangulation() """
# Camera i - Camera j extrinsics
C_CiCj = eye(3)
r_CiCj = np.array([0.05, 0.0, 0.0])
T_CiCj = tf(C_CiCj, r_CiCj)
# Camera 0 pose in world frame
C_WCi = euler321(-pi / 2, 0.0, -pi / 2)
r_WCi = np.array([0.0, 0.0, 0.0])
T_WCi = tf(C_WCi, r_WCi)
# Camera 1 pose in world frame
T_WCj = T_WCi @ T_CiCj
# Projection matrices P_i and P_j
P_i = pinhole_P(self.proj_params, eye(4))
P_j = pinhole_P(self.proj_params, T_CiCj)
# Test multiple times
nb_tests = 100
for _ in range(nb_tests):
# Project feature point p_W to image plane
x = np.random.uniform(-0.05, 0.05)
y = np.random.uniform(-0.05, 0.05)
p_W = np.array([10.0, x, y])
p_Ci_gnd = tf_point(inv(T_WCi), p_W)
p_Cj_gnd = tf_point(inv(T_WCj), p_W)
z_i = pinhole_project(self.proj_params, p_Ci_gnd)
z_j = pinhole_project(self.proj_params, p_Cj_gnd)
# Triangulate
p_Ci_est = linear_triangulation(P_i, P_j, z_i, z_j)
self.assertTrue(np.allclose(p_Ci_est, p_Ci_gnd))
def test_pinhole_K(self):
""" Test pinhole_K() """
fx = 1.0
fy = 2.0
cx = 3.0
cy = 4.0
proj_params = [fx, fy, cx, cy]
K = pinhole_K(proj_params)
expected = np.array([[1.0, 0.0, 3.0], [0.0, 2.0, 4.0], [0.0, 0.0, 1.0]])
self.assertTrue(np.array_equal(K, expected))
def test_pinhole_project(self):
""" Test pinhole_project() """
z = pinhole_project(self.proj_params, self.p_C)
self.assertTrue(isclose(z[0], 320.0))
self.assertTrue(isclose(z[1], 240.0))
def test_pinhole_params_jacobian(self):
""" Test pinhole_params_jacobian() """
# Pinhole params jacobian
fx, fy, cx, cy = self.proj_params
z = np.array([fx * self.x[0] + cx, fy * self.x[1] + cy])
J = pinhole_params_jacobian(self.x)
# Perform numerical diff to obtain finite difference
step_size = 1e-6
tol = 1e-4
finite_diff = zeros((2, 4))
for i in range(4):
params_diff = list(self.proj_params)
params_diff[i] += step_size
fx, fy, cx, cy = params_diff
z_diff = np.array([fx * self.x[0] + cx, fy * self.x[1] + cy])
finite_diff[0:2, i] = (z_diff - z) / step_size
self.assertTrue(matrix_equal(finite_diff, J, tol, True))
def test_pinhole_point_jacobian(self):
""" Test pinhole_point_jacobian() """
# Pinhole params jacobian
fx, fy, cx, cy = self.proj_params
z = np.array([fx * self.x[0] + cx, fy * self.x[1] + cy])
J = pinhole_point_jacobian(self.proj_params)
# Perform numerical diff to obtain finite difference
step_size = 1e-6
tol = 1e-4
finite_diff = zeros((2, 2))
for i in range(2):
x_diff = list(self.x)
x_diff[i] += step_size
z_diff = np.array([fx * x_diff[0] + cx, fy * x_diff[1] + cy])
finite_diff[0:2, i] = (z_diff - z) / step_size
self.assertTrue(matrix_equal(finite_diff, J, tol, True))
# DATASET ####################################################################
class TestEuroc(unittest.TestCase):
""" Test Euroc dataset loader """
def test_load(self):
""" Test load """
dataset = EurocDataset(euroc_data_path)
self.assertTrue(dataset is not None)
class TestKitti(unittest.TestCase):
""" Test KITTI dataset loader """
@unittest.skip("")
def test_load(self):
""" Test load """
data_dir = '/data/kitti'
date = "2011_09_26"
seq = "93"
dataset = KittiRawDataset(data_dir, date, seq, True)
# dataset.plot_frames()
for i in range(dataset.nb_camera_images()):
cam0_img = dataset.get_camera_image(0, index=i)
cam1_img = dataset.get_camera_image(1, index=i)
cam2_img = dataset.get_camera_image(2, index=i)
cam3_img = dataset.get_camera_image(3, index=i)
img_size = cam0_img.shape
img_new_size = (int(img_size[1] / 2.0), int(img_size[0] / 2.0))
cam0_img = cv2.resize(cam0_img, img_new_size)
cam1_img = cv2.resize(cam1_img, img_new_size)
cam2_img = cv2.resize(cam2_img, img_new_size)
cam3_img = cv2.resize(cam3_img, img_new_size)
cv2.imshow("viz", cv2.vconcat([cam0_img, cam1_img, cam2_img, cam3_img]))
cv2.waitKey(0)
self.assertTrue(dataset is not None)
# STATE ESTIMATION ############################################################
class TestFactors(unittest.TestCase):
""" Test factors """
def test_pose_factor(self):
""" Test pose factor """
# Setup camera pose T_WC
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([0.1, 0.2, 0.3])
T_WC = tf(rot, trans)
rot = euler2quat(-pi / 2.0 + 0.01, 0.0 + 0.01, -pi / 2.0 + 0.01)
trans = np.array([0.1 + 0.01, 0.2 + 0.01, 0.3 + 0.01])
T_WC_diff = tf(rot, trans)
pose_est = pose_setup(0, T_WC_diff)
# Create factor
param_ids = [0]
covar = eye(6)
factor = PoseFactor(param_ids, T_WC, covar)
# Test jacobians
fvars = [pose_est]
self.assertTrue(check_factor_jacobian(factor, fvars, 0, "J_pose"))
def test_ba_factor(self):
""" Test ba factor """
# Setup camera pose T_WC
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([0.1, 0.2, 0.3])
T_WC = tf(rot, trans)
cam_pose = pose_setup(0, T_WC)
# Setup cam0
cam_idx = 0
img_w = 640
img_h = 480
res = [img_w, img_h]
fov = 60.0
fx = focal_length(img_w, fov)
fy = focal_length(img_h, fov)
cx = img_w / 2.0
cy = img_h / 2.0
params = [fx, fy, cx, cy, -0.01, 0.01, 1e-4, 1e-4]
cam_params = camera_params_setup(cam_idx, res, "pinhole", "radtan4", params)
cam_geom = camera_geometry_setup(cam_idx, res, "pinhole", "radtan4")
# Setup feature
p_W = np.array([10, random.uniform(0.0, 1.0), random.uniform(0.0, 1.0)])
# -- Feature XYZ parameterization
feature = feature_setup(p_W)
# # -- Feature inverse depth parameterization
# param = idp_param(camera, T_WC, z)
# feature = feature_init(0, param)
# -- Calculate image point
p_C = tf_point(inv(T_WC), p_W)
z = cam_geom.project(cam_params.param, p_C)
# Setup factor
param_ids = [0, 1, 2]
factor = BAFactor(cam_geom, param_ids, z)
# Test jacobians
fvars = [cam_pose, feature, cam_params]
self.assertTrue(check_factor_jacobian(factor, fvars, 0, "J_cam_pose"))
self.assertTrue(check_factor_jacobian(factor, fvars, 1, "J_feature"))
self.assertTrue(check_factor_jacobian(factor, fvars, 2, "J_cam_params"))
def test_vision_factor(self):
""" Test vision factor """
# Setup camera pose T_WB
rot = euler2quat(0.01, 0.01, 0.03)
trans = np.array([0.001, 0.002, 0.003])
T_WB = tf(rot, trans)
pose = pose_setup(0, T_WB)
# Setup camera extrinsics T_BCi
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([0.1, 0.2, 0.3])
T_BCi = tf(rot, trans)
cam_exts = extrinsics_setup(T_BCi)
# Setup cam0
cam_idx = 0
img_w = 640
img_h = 480
res = [img_w, img_h]
fov = 60.0
fx = focal_length(img_w, fov)
fy = focal_length(img_h, fov)
cx = img_w / 2.0
cy = img_h / 2.0
params = [fx, fy, cx, cy, -0.01, 0.01, 1e-4, 1e-4]
cam_params = camera_params_setup(cam_idx, res, "pinhole", "radtan4", params)
cam_geom = camera_geometry_setup(cam_idx, res, "pinhole", "radtan4")
# Setup feature
p_W = np.array([10, random.uniform(0.0, 1.0), random.uniform(0.0, 1.0)])
# -- Feature XYZ parameterization
feature = feature_setup(p_W)
# # -- Feature inverse depth parameterization
# param = idp_param(camera, T_WC, z)
# feature = feature_init(0, param)
# -- Calculate image point
T_WCi = T_WB * T_BCi
p_C = tf_point(inv(T_WCi), p_W)
z = cam_geom.project(cam_params.param, p_C)
# Setup factor
param_ids = [0, 1, 2, 3]
factor = VisionFactor(cam_geom, param_ids, z)
# Test jacobians
fvars = [pose, cam_exts, feature, cam_params]
self.assertTrue(check_factor_jacobian(factor, fvars, 0, "J_pose"))
self.assertTrue(check_factor_jacobian(factor, fvars, 1, "J_cam_exts"))
self.assertTrue(check_factor_jacobian(factor, fvars, 2, "J_feature"))
self.assertTrue(check_factor_jacobian(factor, fvars, 3, "J_cam_params"))
def test_calib_vision_factor(self):
""" Test CalibVisionFactor """
# Calibration target pose T_WF
C_WF = euler321(-pi / 2.0, 0.0, deg2rad(80.0))
r_WF = np.array([0.001, 0.001, 0.001])
T_WF = tf(C_WF, r_WF)
# Body pose T_WB
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([-10.0, 0.0, 0.0])
T_WB = tf(rot, trans)
# Relative pose T_BF
T_BF = inv(T_WB) @ T_WF
# Camera extrinsics T_BCi
rot = eye(3)
trans = np.array([0.001, 0.002, 0.003])
T_BCi = tf(rot, trans)
# Camera 0
cam_idx = 0
img_w = 640
img_h = 480
res = [img_w, img_h]
fov = 90.0
fx = focal_length(img_w, fov)
fy = focal_length(img_h, fov)
cx = img_w / 2.0
cy = img_h / 2.0
params = [fx, fy, cx, cy, -0.01, 0.01, 1e-4, 1e-4]
cam_params = camera_params_setup(cam_idx, res, "pinhole", "radtan4", params)
cam_geom = camera_geometry_setup(cam_idx, res, "pinhole", "radtan4")
# Test factor
grid = AprilGrid()
tag_id = 1
corner_idx = 2
r_FFi = grid.get_object_point(tag_id, corner_idx)
T_CiF = inv(T_BCi) @ T_BF
r_CiFi = tf_point(T_CiF, r_FFi)
z = cam_geom.project(cam_params.param, r_CiFi)
pids = [0, 1, 2]
grid_data = (tag_id, corner_idx, r_FFi, z)
factor = CalibVisionFactor(cam_geom, pids, grid_data)
# Test jacobianstf(rot, trans)
rel_pose = pose_setup(0, T_BF)
cam_exts = extrinsics_setup(T_BCi)
fvars = [rel_pose, cam_exts, cam_params]
self.assertTrue(check_factor_jacobian(factor, fvars, 0, "J_rel_pose"))
self.assertTrue(check_factor_jacobian(factor, fvars, 1, "J_cam_exts"))
self.assertTrue(check_factor_jacobian(factor, fvars, 2, "J_cam_params"))
def test_imu_factor_propagate(self):
""" Test IMU factor propagate """
# Sim imu data
circle_r = 0.5
circle_v = 1.0
sim_data = SimData(circle_r, circle_v, sim_cams=False)
imu_data = sim_data.imu0_data
# Setup imu parameters
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup imu buffer
start_idx = 0
end_idx = 10
# end_idx = len(imu_data.timestamps) - 1
imu_buf = imu_data.form_imu_buffer(start_idx, end_idx)
# Pose i
ts_i = imu_buf.ts[start_idx]
T_WS_i = imu_data.poses[ts_i]
# Speed and bias i
ts_i = imu_buf.ts[start_idx]
vel_i = imu_data.vel[ts_i]
ba_i = np.array([0.0, 0.0, 0.0])
bg_i = np.array([0.0, 0.0, 0.0])
sb_i = speed_biases_setup(ts_i, vel_i, bg_i, ba_i)
# Propagate imu measurements
data = ImuFactor.propagate(imu_buf, imu_params, sb_i)
# Check propagation
ts_j = imu_data.timestamps[end_idx - 1]
T_WS_j_est = T_WS_i @ tf(data.dC, data.dr)
C_WS_j_est = tf_rot(T_WS_j_est)
T_WS_j_gnd = imu_data.poses[ts_j]
C_WS_j_gnd = tf_rot(T_WS_j_gnd)
# -- Position
trans_diff = norm(tf_trans(T_WS_j_gnd) - tf_trans(T_WS_j_est))
self.assertTrue(trans_diff < 0.05)
# -- Rotation
dC = C_WS_j_gnd.T * C_WS_j_est
dq = quat_normalize(rot2quat(dC))
dC = quat2rot(dq)
rpy_diff = rad2deg(acos((trace(dC) - 1.0) / 2.0))
self.assertTrue(rpy_diff < 1.0)
def test_imu_factor(self):
""" Test IMU factor """
# Simulate imu data
circle_r = 0.5
circle_v = 1.0
sim_data = SimData(circle_r, circle_v, sim_cams=False)
imu_data = sim_data.imu0_data
# Setup imu parameters
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup imu buffer
start_idx = 0
end_idx = 10
imu_buf = imu_data.form_imu_buffer(start_idx, end_idx)
# Pose i
ts_i = imu_buf.ts[start_idx]
T_WS_i = imu_data.poses[ts_i]
pose_i = pose_setup(ts_i, T_WS_i)
# Pose j
ts_j = imu_buf.ts[end_idx - 1]
T_WS_j = imu_data.poses[ts_j]
pose_j = pose_setup(ts_j, T_WS_j)
# Speed and bias i
vel_i = imu_data.vel[ts_i]
ba_i = np.array([0.0, 0.0, 0.0])
bg_i = np.array([0.0, 0.0, 0.0])
sb_i = speed_biases_setup(ts_i, vel_i, bg_i, ba_i)
# Speed and bias j
vel_j = imu_data.vel[ts_j]
ba_j = np.array([0.0, 0.0, 0.0])
bg_j = np.array([0.0, 0.0, 0.0])
sb_j = speed_biases_setup(ts_j, vel_j, bg_j, ba_j)
# Setup IMU factor
param_ids = [0, 1, 2, 3]
factor = ImuFactor(param_ids, imu_params, imu_buf, sb_i)
# Test jacobians
fvars = [pose_i, sb_i, pose_j, sb_j]
self.assertTrue(factor)
# self.assertTrue(check_factor_jacobian(factor, fvars, 0, "J_pose_i"))
# self.assertTrue(check_factor_jacobian(factor, fvars, 1, "J_sb_i", verbose=True))
# self.assertTrue(check_factor_jacobian(factor, fvars, 2, "J_pose_j", verbose=True))
self.assertTrue(
check_factor_jacobian(factor, fvars, 3, "J_sb_j", verbose=True))
class TestFactorGraph(unittest.TestCase):
""" Test Factor Graph """
@classmethod
def setUpClass(cls):
super(TestFactorGraph, cls).setUpClass()
circle_r = 5.0
circle_v = 1.0
pickle_path = '/tmp/sim_data.pickle'
cls.sim_data = SimData.create_or_load(circle_r, circle_v, pickle_path)
def setUp(self):
self.sim_data = TestFactorGraph.sim_data
def test_factor_graph_add_param(self):
""" Test FactorGrpah.add_param() """
# Setup camera pose T_WC
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([0.1, 0.2, 0.3])
T_WC = tf(rot, trans)
pose0 = pose_setup(0, T_WC)
pose1 = pose_setup(1, T_WC)
# Add params
graph = FactorGraph()
pose0_id = graph.add_param(pose0)
pose1_id = graph.add_param(pose1)
# Assert
self.assertEqual(pose0_id, 0)
self.assertEqual(pose1_id, 1)
self.assertNotEqual(pose0, pose1)
self.assertEqual(graph.params[pose0_id], pose0)
self.assertEqual(graph.params[pose1_id], pose1)
def test_factor_graph_add_factor(self):
""" Test FactorGrpah.add_factor() """
# Setup factor graph
graph = FactorGraph()
# Setup camera pose T_WC
rot = euler2quat(-pi / 2.0, 0.0, -pi / 2.0)
trans = np.array([0.1, 0.2, 0.3])
T_WC = tf(rot, trans)
pose = pose_setup(0, T_WC)
pose_id = graph.add_param(pose)
# Create factor
param_ids = [pose_id]
covar = eye(6)
pose_factor = PoseFactor(param_ids, T_WC, covar)
pose_factor_id = graph.add_factor(pose_factor)
# Assert
self.assertEqual(len(graph.params), 1)
self.assertEqual(len(graph.factors), 1)
self.assertEqual(graph.factors[pose_factor_id], pose_factor)
def test_factor_graph_solve_vo(self):
""" Test solving a visual odometry problem """
# Sim data
cam0_data = self.sim_data.get_camera_data(0)
cam0_params = self.sim_data.get_camera_params(0)
cam0_geom = self.sim_data.get_camera_geometry(0)
# Setup factor graph
poses_gnd = []
poses_init = []
poses_est = []
graph = FactorGraph()
# -- Add features
features = self.sim_data.features
feature_ids = []
for i in range(features.shape[0]):
p_W = features[i, :]
# p_W += np.random.rand(3) * 0.1 # perturb feature
feature = feature_setup(p_W, fix=True)
feature_ids.append(graph.add_param(feature))
# -- Add cam0
cam0_id = graph.add_param(cam0_params)
# -- Build bundle adjustment problem
nb_poses = 0
for ts in cam0_data.timestamps:
# Camera frame at ts
cam_frame = cam0_data.frames[ts]
# Add camera pose T_WC0
T_WC0_gnd = cam0_data.poses[ts]
# -- Perturb camera pose
trans_rand = np.random.rand(3)
rvec_rand = np.random.rand(3) * 0.1
T_WC0_init = tf_update(T_WC0_gnd, np.block([*trans_rand, *rvec_rand]))
# -- Add to graph
pose = pose_setup(ts, T_WC0_init)
pose_id = graph.add_param(pose)
poses_gnd.append(T_WC0_gnd)
poses_init.append(T_WC0_init)
poses_est.append(pose_id)
nb_poses += 1
# Add ba factors
for i, idx in enumerate(cam_frame.feature_ids):
z = cam_frame.measurements[i]
param_ids = [pose_id, feature_ids[idx], cam0_id]
graph.add_factor(BAFactor(cam0_geom, param_ids, z))
# Solve
# debug = True
debug = False
# prof = profile_start()
graph.solve(debug)
# profile_stop(prof)
# Visualize
if debug:
pos_gnd = np.array([tf_trans(T) for T in poses_gnd])
pos_init = np.array([tf_trans(T) for T in poses_init])
pos_est = []
for pose_pid in poses_est:
pose = graph.params[pose_pid]
pos_est.append(tf_trans(pose2tf(pose.param)))
pos_est = np.array(pos_est)
plt.figure()
plt.plot(pos_gnd[:, 0], pos_gnd[:, 1], 'g-', label="Ground Truth")
plt.plot(pos_init[:, 0], pos_init[:, 1], 'r-', label="Initial")
plt.plot(pos_est[:, 0], pos_est[:, 1], 'b-', label="Estimated")
plt.xlabel("Displacement [m]")
plt.ylabel("Displacement [m]")
plt.legend(loc=0)
plt.show()
# Asserts
errors = graph.get_reproj_errors()
self.assertTrue(rmse(errors) < 0.1)
def test_factor_graph_solve_io(self):
""" Test solving a pure inertial odometry problem """
# Imu params
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup factor graph
imu0_data = self.sim_data.imu0_data
window_size = 5
start_idx = 0
# end_idx = 200
# end_idx = 2000
end_idx = int((len(imu0_data.timestamps) - 1) / 2.0)
poses_init = []
poses_est = []
sb_est = []
graph = FactorGraph()
graph.solver_lambda = 1e4
# -- Pose i
ts_i = imu0_data.timestamps[start_idx]
T_WS_i = imu0_data.poses[ts_i]
pose_i = pose_setup(ts_i, T_WS_i)
pose_i_id = graph.add_param(pose_i)
poses_init.append(T_WS_i)
poses_est.append(pose_i_id)
# -- Speed and biases i
vel_i = imu0_data.vel[ts_i]
ba_i = np.array([0.0, 0.0, 0.0])
bg_i = np.array([0.0, 0.0, 0.0])
sb_i = speed_biases_setup(ts_i, vel_i, ba_i, bg_i)
sb_i_id = graph.add_param(sb_i)
sb_est.append(sb_i_id)
for ts_idx in range(start_idx + window_size, end_idx, window_size):
# -- Pose j
ts_j = imu0_data.timestamps[ts_idx]
T_WS_j = imu0_data.poses[ts_j]
# ---- Pertrub pose j
trans_rand = np.random.rand(3)
rvec_rand = np.random.rand(3) * 0.01
T_WS_j = tf_update(T_WS_j, np.block([*trans_rand, *rvec_rand]))
# ---- Add to factor graph
pose_j = pose_setup(ts_j, T_WS_j)
pose_j_id = graph.add_param(pose_j)
# -- Speed and biases j
vel_j = imu0_data.vel[ts_j]
ba_j = np.array([0.0, 0.0, 0.0])
bg_j = np.array([0.0, 0.0, 0.0])
sb_j = speed_biases_setup(ts_j, vel_j, ba_j, bg_j)
sb_j_id = graph.add_param(sb_j)
# ---- Keep track of initial and estimate pose
poses_init.append(T_WS_j)
poses_est.append(pose_j_id)
sb_est.append(sb_j_id)
# -- Imu Factor
param_ids = [pose_i_id, sb_i_id, pose_j_id, sb_j_id]
imu_buf = imu0_data.form_imu_buffer(ts_idx - window_size, ts_idx)
factor = ImuFactor(param_ids, imu_params, imu_buf, sb_i)
graph.add_factor(factor)
# -- Update
pose_i_id = pose_j_id
pose_i = pose_j
sb_i_id = sb_j_id
sb_i = sb_j
# Solve
debug = False
# debug = True
# prof = profile_start()
graph.solve(debug)
# profile_stop(prof)
if debug:
pos_init = np.array([tf_trans(T) for T in poses_init])
pos_est = []
for pose_pid in poses_est:
pose = graph.params[pose_pid]
pos_est.append(tf_trans(pose2tf(pose.param)))
pos_est = np.array(pos_est)
sb_est = [graph.params[pid] for pid in sb_est]
sb_ts0 = sb_est[0].ts
sb_time = np.array([ts2sec(sb.ts - sb_ts0) for sb in sb_est])
vel_est = np.array([sb.param[0:3] for sb in sb_est])
ba_est = np.array([sb.param[3:6] for sb in sb_est])
bg_est = np.array([sb.param[6:9] for sb in sb_est])
plt.figure()
plt.subplot(411)
plt.plot(pos_init[:, 0], pos_init[:, 1], 'r-')
plt.plot(pos_est[:, 0], pos_est[:, 1], 'b-')
plt.xlabel("Displacement [m]")
plt.ylabel("Displacement [m]")
plt.subplot(412)
plt.plot(sb_time, vel_est[:, 0], 'r-')
plt.plot(sb_time, vel_est[:, 1], 'g-')
plt.plot(sb_time, vel_est[:, 2], 'b-')
plt.subplot(413)
plt.plot(sb_time, ba_est[:, 0], 'r-')
plt.plot(sb_time, ba_est[:, 1], 'g-')
plt.plot(sb_time, ba_est[:, 2], 'b-')
plt.subplot(414)
plt.plot(sb_time, bg_est[:, 0], 'r-')
plt.plot(sb_time, bg_est[:, 1], 'g-')
plt.plot(sb_time, bg_est[:, 2], 'b-')
plt.show()
@unittest.skip("")
def test_factor_graph_solve_vio(self):
""" Test solving a visual inertial odometry problem """
# Imu params
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup factor graph
feature_tracker = SimFeatureTracker()
tracker = Tracker(feature_tracker)
# -- Set initial pose
ts0 = self.sim_data.imu0_data.timestamps[0]
T_WB = self.sim_data.imu0_data.poses[ts0]
tracker.set_initial_pose(T_WB)
# -- Add imu
tracker.add_imu(imu_params)
# -- Add cam0
cam0_idx = 0
cam0_data = self.sim_data.mcam_data[cam0_idx]
cam0_params = cam0_data.camera
cam0_exts = extrinsics_setup(self.sim_data.T_BC0)
tracker.add_camera(cam0_idx, cam0_params, cam0_exts)
# -- Add cam1
cam1_idx = 1
cam1_data = self.sim_data.mcam_data[cam1_idx]
cam1_params = cam1_data.camera
cam1_exts = extrinsics_setup(self.sim_data.T_BC1)
tracker.add_camera(cam1_idx, cam1_params, cam1_exts)
# -- Add camera overlap
tracker.add_overlap(cam0_idx, cam1_idx)
# -- Loop through simulation data
mcam_buf = MultiCameraBuffer(2)
for ts in self.sim_data.timeline.get_timestamps():
for event in self.sim_data.timeline.get_events(ts):
if isinstance(event, ImuEvent):
tracker.inertial_callback(event.ts, event.acc, event.gyr)
elif isinstance(event, CameraEvent):
mcam_buf.add(ts, event.cam_idx, event.image)
if mcam_buf.ready():
tracker.vision_callback(ts, mcam_buf.get_data())
mcam_buf.reset()
class TestFeatureTracking(unittest.TestCase):
""" Test feature tracking functions """
@classmethod
def setUpClass(cls):
super(TestFeatureTracking, cls).setUpClass()
cls.dataset = EurocDataset(euroc_data_path)
def setUp(self):
# Setup test images
self.dataset = TestFeatureTracking.dataset
ts = self.dataset.cam0_data.timestamps[800]
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
self.img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
self.img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
def test_spread_keypoints(self):
""" Test spread_keypoints() """
# img = np.zeros((140, 160))
# kps = []
# kps.append(cv2.KeyPoint(10, 10, 0, 0.0, 0.0, 0))
# kps.append(cv2.KeyPoint(150, 130, 0, 0.0, 0.0, 1))
# kps = spread_keypoints(img, kps, 5, debug=True)
detector = cv2.FastFeatureDetector_create(threshold=50)
kwargs = {'optflow_mode': True, 'debug': False}
kps = grid_detect(detector, self.img0, **kwargs)
kps = spread_keypoints(self.img0, kps, 20, debug=False)
self.assertTrue(len(kps))
def test_feature_grid_cell_index(self):
""" Test FeatureGrid.grid_cell_index() """
grid_rows = 4
grid_cols = 4
image_shape = (280, 320)
keypoints = [[0, 0], [320, 0], [0, 280], [320, 280]]
grid = FeatureGrid(grid_rows, grid_cols, image_shape, keypoints)
self.assertEqual(grid.cell[0], 1)
self.assertEqual(grid.cell[3], 1)
self.assertEqual(grid.cell[12], 1)
self.assertEqual(grid.cell[15], 1)
def test_feature_grid_count(self):
""" Test FeatureGrid.count() """
grid_rows = 4
grid_cols = 4
image_shape = (280, 320)
pts = [[0, 0], [320, 0], [0, 280], [320, 280]]
grid = FeatureGrid(grid_rows, grid_cols, image_shape, pts)
self.assertEqual(grid.count(0), 1)
self.assertEqual(grid.count(3), 1)
self.assertEqual(grid.count(12), 1)
self.assertEqual(grid.count(15), 1)
def test_grid_detect(self):
""" Test grid_detect() """
debug = False
# detector = cv2.ORB_create(nfeatures=500)
# kps, des = grid_detect(detector, self.img0, **kwargs)
# self.assertTrue(len(kps) > 0)
# self.assertEqual(des.shape[0], len(kps))
detector = cv2.FastFeatureDetector_create(threshold=50)
kwargs = {'optflow_mode': True, 'debug': debug}
kps = grid_detect(detector, self.img0, **kwargs)
self.assertTrue(len(kps) > 0)
def test_optflow_track(self):
""" Test optflow_track() """
debug = False
# Detect
feature = cv2.ORB_create(nfeatures=100)
kps, des = grid_detect(feature, self.img0)
self.assertTrue(len(kps) == len(des))
# Track
pts_i = np.array([kp.pt for kp in kps], dtype=np.float32)
track_results = optflow_track(self.img0, self.img1, pts_i, debug=debug)
(pts_i, pts_j, inliers) = track_results
self.assertTrue(len(pts_i) == len(pts_j))
self.assertTrue(len(pts_i) == len(inliers))
class TestFeatureTracker(unittest.TestCase):
""" Test FeatureTracker """
@classmethod
def setUpClass(cls):
super(TestFeatureTracker, cls).setUpClass()
cls.dataset = EurocDataset(euroc_data_path)
def setUp(self):
# Setup test images
self.dataset = TestFeatureTracker.dataset
ts = self.dataset.cam0_data.timestamps[0]
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
self.img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
self.img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
# Setup cameras
# -- cam0
res = self.dataset.cam0_data.config.resolution
proj_params = self.dataset.cam0_data.config.intrinsics
dist_params = self.dataset.cam0_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cam0 = camera_params_setup(0, res, proj_model, dist_model, params)
# -- cam1
res = self.dataset.cam1_data.config.resolution
proj_params = self.dataset.cam1_data.config.intrinsics
dist_params = self.dataset.cam1_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cam1 = camera_params_setup(1, res, proj_model, dist_model, params)
# Setup camera extrinsics
# -- cam0
T_BC0 = self.dataset.cam0_data.config.T_BS
cam0_exts = extrinsics_setup(T_BC0)
# -- cam1
T_BC1 = self.dataset.cam1_data.config.T_BS
cam1_exts = extrinsics_setup(T_BC1)
# Setup feature tracker
self.feature_tracker = FeatureTracker()
self.feature_tracker.add_camera(0, cam0, cam0_exts)
self.feature_tracker.add_camera(1, cam1, cam1_exts)
self.feature_tracker.add_overlap(0, 1)
def test_detect(self):
""" Test FeatureTracker._detect() """
# Load and detect features from single image
kps = self.feature_tracker._detect(self.img0)
self.assertTrue(len(kps) > 0)
def test_detect_overlaps(self):
""" Test FeatureTracker._detect_overlaps() """
debug = False
# debug = True
# Feed camera images to feature tracker
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_overlaps(mcam_imgs)
# Assert
data_i = self.feature_tracker.cam_data[0]
data_j = self.feature_tracker.cam_data[1]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
overlapping_ids = self.feature_tracker.feature_overlaps
self.assertTrue(len(kps_i) == len(kps_j))
self.assertTrue(len(kps_i) == len(overlapping_ids))
# Visualize
for cam_i, overlaps in self.feature_tracker.cam_overlaps.items():
cam_j = overlaps[0]
img_i = mcam_imgs[cam_i]
img_j = mcam_imgs[cam_j]
data_i = self.feature_tracker.cam_data[cam_i]
data_j = self.feature_tracker.cam_data[cam_j]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
# viz = draw_matches(img_i, img_j, kps_i, kps_j)
matches = []
for i in range(len(kps_i)):
matches.append(cv2.DMatch(i, i, 0))
viz = cv2.drawMatches(img_i, kps_i, img_j, kps_j, matches, None)
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_detect_nonoverlaps(self):
""" Test FeatureTracker._detect_nonoverlaps() """
# Feed camera images to feature tracker
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_nonoverlaps(mcam_imgs)
# Visualize
for cam_i, overlaps in self.feature_tracker.cam_overlaps.items():
cam_j = overlaps[0]
img_i = mcam_imgs[cam_i]
img_j = mcam_imgs[cam_j]
data_i = self.feature_tracker.cam_data[cam_i]
data_j = self.feature_tracker.cam_data[cam_j]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
viz_i = cv2.drawKeypoints(img_i, kps_i, None)
viz_j = cv2.drawKeypoints(img_j, kps_j, None)
viz = cv2.hconcat([viz_i, viz_j])
debug = False
# debug = True
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_detect_new(self):
""" Test FeatureTracker.detect_new() """
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_new(mcam_imgs)
ft_data = self.feature_tracker.cam_data
viz = visualize_tracking(ft_data)
debug = False
# debug = True
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_update(self):
""" Test FeatureTracker.update() """
for ts in self.dataset.cam0_data.timestamps[1000:1200]:
# for ts in self.dataset.cam0_data.timestamps:
# Load images
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
# Feed camera images to feature tracker
mcam_imgs = {0: img0, 1: img1}
ft_data = self.feature_tracker.update(ts, mcam_imgs)
# Visualize
debug = False
# debug = True
if debug:
sys.stdout.flush()
viz = visualize_tracking(ft_data)
cv2.imshow('viz', viz)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
class TestTracker(unittest.TestCase):
""" Test Tracker """
@classmethod
def setUpClass(cls):
super(TestTracker, cls).setUpClass()
# Load dataset
cls.dataset = EurocDataset(euroc_data_path)
ts0 = cls.dataset.cam0_data.timestamps[0]
cls.img0 = cls.dataset.get_camera_image(0, ts0)
cls.img1 = cls.dataset.get_camera_image(1, ts0)
# Imu params
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
cls.imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup cameras
# -- cam0
res = cls.dataset.cam0_data.config.resolution
proj_params = cls.dataset.cam0_data.config.intrinsics
dist_params = cls.dataset.cam0_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cls.cam0 = camera_params_setup(0, res, proj_model, dist_model, params)
cls.cam0.fix = True
# -- cam1
res = cls.dataset.cam1_data.config.resolution
proj_params = cls.dataset.cam1_data.config.intrinsics
dist_params = cls.dataset.cam1_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cls.cam1 = camera_params_setup(1, res, proj_model, dist_model, params)
cls.cam1.fix = True
# Setup camera extrinsics
# -- cam0
T_BC0 = cls.dataset.cam0_data.config.T_BS
cls.cam0_exts = extrinsics_setup(T_BC0)
cls.cam0_exts.fix = True
# -- cam1
T_BC1 = cls.dataset.cam1_data.config.T_BS
cls.cam1_exts = extrinsics_setup(T_BC1)
cls.cam1_exts.fix = True
def setUp(self):
# Setup test dataset
self.dataset = TestTracker.dataset
self.imu_params = TestTracker.imu_params
self.cam0 = TestTracker.cam0
self.cam1 = TestTracker.cam1
self.cam0_exts = TestTracker.cam0_exts
self.cam1_exts = TestTracker.cam1_exts
# Setup tracker
ts0 = self.dataset.ground_truth.timestamps[0]
T_WB = self.dataset.ground_truth.T_WB[ts0]
feature_tracker = FeatureTracker()
self.tracker = Tracker(feature_tracker)
self.tracker.add_imu(self.imu_params)
self.tracker.add_camera(0, self.cam0, self.cam0_exts)
self.tracker.add_camera(1, self.cam1, self.cam1_exts)
self.tracker.add_overlap(0, 1)
self.tracker.set_initial_pose(T_WB)
def test_tracker_add_camera(self):
""" Test Tracker.add_camera() """
self.assertTrue(len(self.tracker.cam_params), 2)
self.assertTrue(len(self.tracker.cam_geoms), 2)
self.assertTrue(len(self.tracker.cam_exts), 2)
def test_tracker_set_initial_pose(self):
""" Test Tracker.set_initial_pose() """
self.assertTrue(self.tracker.pose_init is not None)
def test_tracker_inertial_callback(self):
""" Test Tracker.inertial_callback() """
ts = 0
acc = np.array([0.0, 0.0, 10.0])
gyr = np.array([0.0, 0.0, 0.0])
self.tracker.inertial_callback(ts, acc, gyr)
self.assertEqual(self.tracker.imu_buf.length(), 1)
self.assertTrue(self.tracker.imu_started)
def test_tracker_triangulate(self):
""" Test Tracker._triangulate() """
# Feature in world frame
p_W = np.array([1.0, 0.01, 0.02])
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Camera parameters and geometry
cam_i = 0
cam_j = 1
cam_params_i = self.tracker.cam_params[cam_i]
cam_params_j = self.tracker.cam_params[cam_j]
cam_geom_i = self.tracker.cam_geoms[cam_i]
cam_geom_j = self.tracker.cam_geoms[cam_j]
# Camera extrinsics
T_BCi = pose2tf(self.tracker.cam_exts[cam_i].param)
T_BCj = pose2tf(self.tracker.cam_exts[cam_j].param)
# Point relative to cam_i and cam_j
p_Ci = tf_point(inv(T_WB @ T_BCi), p_W)
p_Cj = tf_point(inv(T_WB @ T_BCj), p_W)
# Image point z_i and z_j
z_i = cam_geom_i.project(cam_params_i.param, p_Ci)
z_j = cam_geom_j.project(cam_params_j.param, p_Cj)
# Triangulate
p_W_est = self.tracker._triangulate(cam_i, cam_j, z_i, z_j, T_WB)
# Assert
self.assertTrue(np.allclose(p_W_est, p_W))
def test_tracker_add_pose(self):
""" Test Tracker._add_pose() """
# Timestamp
ts = 0
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Add pose
pose = self.tracker._add_pose(ts, T_WB)
self.assertTrue(pose is not None)
def test_tracker_add_feature(self):
""" Test Tracker._add_feature() """
# Feature in world frame
p_W = np.array([1.0, 0.01, 0.02])
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Project world point to image plane
cam_idx = 0
cam_params = self.tracker.cam_params[cam_idx]
cam_geom = self.tracker.cam_geoms[cam_idx]
T_BC = pose2tf(self.tracker.cam_exts[cam_idx].param)
p_C = tf_point( | inv(T_WB @ T_BC) | numpy.linalg.inv |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 17:29:25 2020
@author: <NAME>
"""
import numpy as np
import AlgoritmiAlgebraLineare as al
# -------- Test del metodo di sostituzione all'indietro ------
print('\n TESTING BACKWARD SUBSTITION')
print(' -------------------------------------')
print(' Dimension: 5x5')
matrix = np.array([[1, 2, 3, 5, 8],
[0, 1, 5, 1, 7],
[0, 0, 2, 5, 2],
[0, 0, 0, 5, 2],
[0, 0, 0, 0, 2]])
#Fisso ad uno le soluzioni del sistema
xsol = np.ones(5)
#Calcolo il vettore dei termini noti
b = np.dot(matrix,xsol)
#Applico backwardSubstition a matrix e b e mi aspetto
#di ritrovare xsol
findSol = al.backwardSubstition(matrix, b)
print(' Solution of linear system:\n ', findSol)
print('\n TESTING BACKWARD SUBSTITION')
print(' -------------------------------------')
print(' Dimension: 50x50')
#Dimensione matrice
n = 50
M = 10
#Creo una matrice 50x50 con valori compresi tra 0 e 20
matrix = np.random.random((n, n))*2*M
#converto in float tipo dei coefficienti
matrix = matrix.astype(float)
#trasformo la matrice in una matrice triangolare superiore
matrix = np.triu(matrix)
#Fisso ad 1 la soluzione
xs = np.ones(n)
#Calcolo il vettore dei termini noti
b = | np.dot(matrix, xs) | numpy.dot |
import math
import gym
from frozen_lake import *
import numpy as np
import time
from utils import *
from tqdm import *
import matplotlib.pyplot as plt
def learn_Q_QLearning(env, num_episodes=10000, gamma = 0.99, lr = 0.1, e = 0.2, max_step=6):
"""Learn state-action values using the Q-learning algorithm with epsilon-greedy exploration strategy(no decay)
Feel free to reuse your assignment1's code
Parameters
----------
env: gym.core.Environment
Environment to compute Q function for. Must have nS, nA, and P as attributes.
num_episodes: int
Number of episodes of training.
gamma: float
Discount factor. Number in range [0, 1)
learning_rate: float
Learning rate. Number in range [0, 1)
e: float
Epsilon value used in the epsilon-greedy method.
max_step: Int
max number of steps in each episode
Returns
-------
np.array
An array of shape [env.nS x env.nA] representing state-action values
"""
Q = np.zeros((env.nS, env.nA))
########################################################
# YOUR CODE HERE #
########################################################
total_score = 0
average_score = np.zeros(num_episodes)
for i in range(num_episodes):
done = False
state = env.reset()
for _ in range(max_step):
if done:
break
if np.random.rand() > e:
action = np.argmax(Q[state])
else:
action = np.random.randint(env.nA)
nextstate, reward, done, _ = env.step(action)
Q[state][action] = (1-lr)*Q[state][action]+lr*(reward+gamma*np.max(Q[nextstate]))
state = nextstate
total_score += reward
average_score[i] = total_score / (i+1)
########################################################
# END YOUR CODE #
########################################################
return (Q, average_score)
def main():
env = FrozenLakeEnv(is_slippery=False)
for e in tqdm(np.linspace(0,1,11)):
(Q, average_score) = learn_Q_QLearning(env, num_episodes = 10000, gamma = 0.99, lr = 0.1, e = e)
render_single_Q(env, Q)
plt.plot( | np.arange(10000) | numpy.arange |
import numpy as np
from bayeso.gp import gp
import utils
parser, args = utils.get_parser()
str_fun = args.function
print(str_fun)
if str_fun == 'few':
import fun_1d_1 as unc
str_exp = 'unc_1d_few_gp'
elif str_fun == 'many':
import fun_1d_2 as unc
str_exp = 'unc_1d_many_gp'
elif str_fun == 'cubic':
import fun_1d_3 as unc
str_exp = 'unc_1d_cubic_gp'
else:
raise ValueError('not allowed str_fun')
print(str_exp)
if __name__ == '__main__':
mean, std, Sigma = gp.predict_with_optimized_hyps(unc.X_train, unc.Y_train[..., np.newaxis], unc.X_test, str_cov='matern52', fix_noise=False, debug=True, str_optimizer_method='Nelder-Mead')
mean = np.squeeze(mean, axis=1)
std = np.squeeze(std, axis=1)
mean_gp, std_gp, _ = gp.predict_with_optimized_hyps(unc.X_train, unc.Y_train[..., np.newaxis], unc.X_test, str_cov='matern52', fix_noise=False, debug=True, str_optimizer_method='Nelder-Mead')
mean_gp = np.squeeze(mean_gp, axis=1)
std_gp = np.squeeze(std_gp, axis=1)
nll = utils.compute_nll(mean, std, np.squeeze(unc.X_test, axis=1), unc.Y_test, | np.squeeze(unc.X_train, axis=1) | numpy.squeeze |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images_this_iter, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
# test size given as single int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 8)
assert observed.shape == (1, 8, 8, 3)
# test size given as single float
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 2.0)
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 0.5)
assert observed.shape == (1, 2, 2, 3)
# test size given as (float, float)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 2.0))
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 0.5))
assert observed.shape == (1, 2, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 0.5))
assert observed.shape == (1, 8, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 2.0))
assert observed.shape == (1, 2, 8, 3)
# test size given as int+float or float+int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (11, 2.0))
assert observed.shape == (1, 11, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 11))
assert observed.shape == (1, 8, 11, 3)
# test no channels
images = np.zeros((1, 4, 4), dtype=np.uint8)
images_rs = ia.imresize_many_images(images, (2, 2))
assert images_rs.shape == (1, 2, 2)
images = [np.zeros((4, 4), dtype=np.uint8)]
images_rs = ia.imresize_many_images(images, (2, 2))
assert isinstance(images_rs, list)
assert images_rs[0].shape == (2, 2)
# test len 0 input
observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4))
assert ia.is_np_array(observed)
assert observed.dtype.type == np.uint8
assert len(observed) == 0
observed = ia.imresize_many_images([], (4, 4))
assert isinstance(observed, list)
assert len(observed) == 0
# test images with zero height/width
images = [np.zeros((0, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((4, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((0, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
# test invalid sizes
sizes_all = [(-1, 2), (0, 2)]
sizes_all = sizes_all\
+ [(float(a), b) for a, b in sizes_all]\
+ [(a, float(b)) for a, b in sizes_all]\
+ [(float(a), float(b)) for a, b in sizes_all]\
+ [(-a, -b) for a, b in sizes_all]\
+ [(-float(a), -b) for a, b in sizes_all]\
+ [(-a, -float(b)) for a, b in sizes_all]\
+ [(-float(a), -float(b)) for a, b in sizes_all]
sizes_all = sizes_all\
+ [(b, a) for a, b in sizes_all]
sizes_all = sizes_all\
+ [-1.0, 0.0, -1, 0]
for sizes in sizes_all:
images = [np.zeros((4, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=sizes)
except Exception as exc:
assert "value is zero or lower than zero." in str(exc)
got_exception = True
assert got_exception
# test list input but all with same shape
images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)]
observed = ia.imresize_many_images(images, (4, 4))
assert isinstance(observed, list)
assert all([image.shape == (4, 4, 3) for image in observed])
assert all([image.dtype.type == np.uint8 for image in observed])
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=123)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 123
assert arr_pad[0, 1] == 123
assert arr_pad[0, 2] == 123
assert arr_pad[1, 0] == 0
arr = np.zeros((1, 1), dtype=dtype) + 100
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 200
assert arr_pad[1, 0] == 175
assert arr_pad[2, 0] == 150
assert arr_pad[3, 0] == 125
assert arr_pad[4, 0] == 100
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6
assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6
arr = np.zeros((1, 1), dtype=dtype) + 0.6
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6
assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6
assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6
assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6
assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
# TODO add tests for return_pad_values=True
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
eps = np.finfo(np.float32).eps
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert 20 - 2*eps < bb_cut.y2 < 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert 30 - 2*eps < bb_cut.x2 < 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
eps = np.finfo(np.float32).eps
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert | np.allclose(heatmaps_drawn[y, x], v2) | numpy.allclose |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
def ZNCC(img, part):
out = img.copy()
H, W, _ = img.shape
h, w, _ = part.shape
_img = img - | np.mean(img, axis=(0, 1)) | numpy.mean |
# python 2.7
from __future__ import absolute_import, division, print_function
import os.path
from os.path import join, basename
from glob import glob
import cv2
import numpy as np
import tensorflow as tf
def filename_key(x):
res = int(splitext(basename(x))[0])
return res
def get_data(data_folder, label_folder, image_h, image_w, norm=False):
background_color = np.array([255, 255, 255]) # white
lane_color = np.array([0, 0, 255]) # red
image_paths = glob(join(data_folder, '*.png'))
# label_paths = glob(join(label_folder, '*.png'))
# make sure the label and image are matched
image_paths.sort()
# label_paths.sort()
images = [] # data
gt_images = [] # labels
for image_file_id in range(0, len(image_paths)):
image_file = image_paths[image_file_id]
image = cv2.imread(image_file, 3)
if (norm):
image = normalize(image)
images.append(image)
# for each image in the training set, find the related label
img_name = basename(image_file)
# gt_image_file = label_paths[image_file_id]
gt_image_file = join(label_folder, img_name)
gt_image = cv2.imread(gt_image_file, 3)
gt_bg = np.all(gt_image == background_color, axis=2).reshape(image_h, image_w, 1)
gt_l = np.all(gt_image == lane_color, axis=2).reshape(image_h, image_w, 1)
gt_image = np.concatenate((gt_bg, gt_l), axis=2)
gt_images.append(gt_image)
return np.array(images), | np.array(gt_images) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import inspect
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS,
FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, IGNORED_FUNCTIONS)
from astropy.utils.compat import (
NUMPY_LT_1_14, NUMPY_LT_1_15, NUMPY_LT_1_16, NUMPY_LT_1_18)
NO_ARRAY_FUNCTION = not ARRAY_FUNCTION_ENABLED
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
all_wrapped_functions = {name: f for name, f in np.__dict__.items()
if callable(f) and hasattr(f, '__wrapped__') and
(NUMPY_LT_1_15 or f is not np.printoptions)}
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
# alen is deprecated in Numpy 1.8
if NUMPY_LT_1_18:
def test_alen(self):
assert np.alen(self.q) == 3
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="expand_dims used asarray in numpy <1.16")
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
@pytest.mark.xfail(NUMPY_LT_1_15,
reason="flip needs axis argument in numpy <1.15")
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# TODO: should we change the default for subok?
self.check(np.broadcast_to, (3, 3, 3), subok=True)
def test_broadcast_arrays(self):
# TODO: should we change the default for subok?
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
@pytest.mark.skip(NUMPY_LT_1_15,
reason="take_long_axis added in numpy 1.15")
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
@pytest.mark.skip(NUMPY_LT_1_15,
reason="put_long_axis added in numpy 1.15")
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
o = func(q_list, *args, **kwargs)
unit = q_list[0].unit
v_list = [q.to_value(unit) for q in q_list]
expected = func(v_list, *args, **kwargs) * unit
assert o.shape == expected.shape
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_stack(self):
self.check(np.stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_column_stack(self):
self.check(np.column_stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_hstack(self):
self.check(np.hstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vstack(self):
self.check(np.vstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dstack(self):
self.check(np.dstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_block(self):
self.check(np.block)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_insert(self):
# Unit of inserted values is ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) * u.m
assert np.all(out == expected)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(NotImplementedError):
np.any(self.q)
def test_all(self):
with pytest.raises(NotImplementedError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(NotImplementedError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(NotImplementedError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="angle used asarray in numpy <1.16")
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tril(self):
self.check(np.tril)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_triu(self):
self.check(np.triu)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1., 2.]*u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.*u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q, nan=1.*u.km, posinf=2.*u.km, neginf=-2*u.km)
expected = [-2000., 2000., 1000., 3., 4.] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1. + 1j]*u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1. + 1j]*u.m)
def test_isclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 102., 199.]) * u.cm
atol = 1.5 * u.cm
rtol = 1. * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=atol.to_value(q1.unit))
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
@pytest.mark.xfail
def test_isclose_failure(self):
q_cm = self.q.to(u.cm)
# atol does not have units; TODO: should this work by default?
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = | np.average(q1.value, weights=q2.value) | numpy.average |
# -*- coding: utf-8 -*-
'''
By <NAME>(<EMAIL>) and <NAME>(https://github.com/ozmig77)
https://www.github.com/kyubyong/g2p
'''
import nltk
import numpy as np
import codecs
import os
import re
from builtins import str as unicode
import hazm
from PersianG2p.expand import normalize_numbers
from PersianG2p.hparams import hp
dirname = os.path.dirname(__file__)
def construct_homograph_dictionary():
f = os.path.join(dirname,'homographs.en')
homograph2features = dict()
for line in codecs.open(f, 'r', 'utf8').read().splitlines():
if line.startswith("#"): continue # comment
headword, pron1, pron2, pos1 = line.strip().split("|")
homograph2features[headword.lower()] = (pron1.split(), pron2.split(), pos1)
return homograph2features
def load_vocab():
g2idx = {g: idx for idx, g in enumerate(hp.graphemes)}
idx2g = {idx: g for idx, g in enumerate(hp.graphemes)}
p2idx = {p: idx for idx, p in enumerate(hp.phonemes)}
idx2p = {idx: p for idx, p in enumerate(hp.phonemes)}
return g2idx, idx2g, p2idx, idx2p # note that g and p mean grapheme and phoneme, respectively.
# def segment(text):
# '''
# Splits text into `tokens`.
# :param text: A string.
# :return: A list of tokens (string).
# '''
# print(text)
# text = re.sub('([.,?!]( |$))', r' \1', text)
# print(text)
# return text.split()
class Persian_g2p_converter(object):
def __init__(self, checkpoint=os.path.join(dirname,'data/checkpoint.npy')):
super().__init__()
# self.graphemes = ["<pad>", "<unk>", "</s>"] + list("آئابتثجحخدذرزسشصضطظعغفقلمنهوپچژکگی")
self.graphemes = hp.graphemes
self.phonemes = hp.phonemes
self.g2idx, self.idx2g, self.p2idx, self.idx2p = load_vocab()
self.checkpoint = checkpoint
# load Tihu dictionary as the Persian lexicon
tihu = {}
#with open("tihudict.dict") as f:
with codecs.open(os.path.join(dirname,"data/tihudict.dict"), encoding='utf-8', mode='r') as f:
for line in f:
(key, val) = line.strip('\n').split('\t')
tihu[key] = val
self.tihu = tihu
self.load_variables()
# self.homograph2features = construct_homograph_dictionary()
def load_variables(self):
self.variables = np.load(os.path.join(dirname, self.checkpoint), allow_pickle=True)
self.enc_emb = self.variables.item().get("encoder.emb.weight") # (29, 64). (len(graphemes), emb)
self.enc_w_ih = self.variables.item().get("encoder.rnn.weight_ih_l0") # (3*128, 64)
self.enc_w_hh = self.variables.item().get("encoder.rnn.weight_hh_l0") # (3*128, 128)
self.enc_b_ih = self.variables.item().get("encoder.rnn.bias_ih_l0") # (3*128,)
self.enc_b_hh = self.variables.item().get("encoder.rnn.bias_hh_l0") # (3*128,)
self.dec_emb = self.variables.item().get("decoder.emb.weight") # (74, 64). (len(phonemes), emb)
self.dec_w_ih = self.variables.item().get("decoder.rnn.weight_ih_l0") # (3*128, 64)
self.dec_w_hh = self.variables.item().get("decoder.rnn.weight_hh_l0") # (3*128, 128)
self.dec_b_ih = self.variables.item().get("decoder.rnn.bias_ih_l0") # (3*128,)
self.dec_b_hh = self.variables.item().get("decoder.rnn.bias_hh_l0") # (3*128,)
self.fc_w = self.variables.item().get("decoder.fc.weight") # (74, 128)
self.fc_b = self.variables.item().get("decoder.fc.bias") # (74,)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def grucell(self, x, h, w_ih, w_hh, b_ih, b_hh):
rzn_ih = np.matmul(x, w_ih.T) + b_ih
rzn_hh = np.matmul(h, w_hh.T) + b_hh
rz_ih, n_ih = rzn_ih[:, :rzn_ih.shape[-1] * 2 // 3], rzn_ih[:, rzn_ih.shape[-1] * 2 // 3:]
rz_hh, n_hh = rzn_hh[:, :rzn_hh.shape[-1] * 2 // 3], rzn_hh[:, rzn_hh.shape[-1] * 2 // 3:]
rz = self.sigmoid(rz_ih + rz_hh)
r, z = np.split(rz, 2, -1)
n = np.tanh(n_ih + r * n_hh)
h = (1 - z) * n + z * h
return h
def gru(self, x, steps, w_ih, w_hh, b_ih, b_hh, h0=None):
if h0 is None:
h0 = np.zeros((x.shape[0], w_hh.shape[1]), np.float32)
h = h0 # initial hidden state
outputs = np.zeros((x.shape[0], steps, w_hh.shape[1]), np.float32)
for t in range(steps):
h = self.grucell(x[:, t, :], h, w_ih, w_hh, b_ih, b_hh) # (b, h)
outputs[:, t, ::] = h
return outputs
def encode(self, word):
chars = list(word) + ["</s>"]
x = [self.g2idx.get(char, self.g2idx["<unk>"]) for char in chars]
x = np.take(self.enc_emb, np.expand_dims(x, 0), axis=0)
return x
def predict(self, word):
# encoder
enc = self.encode(word)
enc = self.gru(enc, len(word) + 1, self.enc_w_ih, self.enc_w_hh,
self.enc_b_ih, self.enc_b_hh, h0=np.zeros((1, self.enc_w_hh.shape[-1]), np.float32))
last_hidden = enc[:, -1, :]
# decoder
dec = | np.take(self.dec_emb, [2], axis=0) | numpy.take |
"""
A FEED-FORWARD DEEP NEURAL NETWORK
"""
import pickle
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import numpy.linalg as la
import seaborn as sns
np.set_printoptions(formatter={'float': '{: 0.1f}'.format})
# Batch Normalization
def batch_norm_ff(modes, v, gamma_bn, beta_bn, i, bnorm):
if bnorm:
eps = 1.0e-1
momenti = 0.9
global running_mean, running_variance
gamma = gamma_bn + 0
beta = beta_bn + 0
v_in = v + 0
m_dim, n_dim = np.shape(v_in)
if modes == 'train':
means = np.mean(v_in, axis=0)
variances = np.var(v_in, axis=0)
va = v_in - means
vx = np.sqrt((variances) + eps) + eps
v_norm = (v_in - means) / (np.sqrt(variances + eps) + eps)
v_out_bn = (gamma * v_norm) + beta
# estimate running averages for test and validation
running_mean[i] = (momenti * running_mean[i]) + (1 - momenti) * means
running_variance[i] = (momenti * running_variance[i]) + (1 - momenti) * variances
cache = [v_norm, v_in, means, variances, m_dim, gamma, beta]
return [v_out_bn, cache]
if modes == 'test' or modes == 'validate':
v_norm = (v_in - running_mean[i]) / (np.sqrt(running_variance[i]) + eps)
v_out_bn = (gamma_bn * v_norm) + beta_bn
return v_out_bn
if not bnorm and modes == 'test':
return v
return [v, 0]
def batch_norm_bp(delta, store, bnorm):
if bnorm:
v_norm, v_in, means, variance, m_dim, gamma, beta = store
eps = 1.0e-8
delta_in = delta + 0
dgamma = np.sum((delta_in * v_norm), axis=0)
dbeta = np.sum(delta_in, axis=0)
inv_std = 1. / (np.sqrt(variance) + eps)
dv_norm = delta_in * gamma
dvar = -0.5 * (inv_std ** 3) * np.sum(dv_norm *(v_in - means), axis = 0)
dmean = -1 * inv_std * np.sum(dv_norm, axis=0) + dvar * -2.0 * np.mean((v_in - means), axis=0)
ddelta = (inv_std * dv_norm) + (2.0 / m_dim * (v_in - means) * dvar) + (dmean / m_dim)
# dx1 = gamma * t / m_dim
# dx2 = (m_dim * delta_in) - np.sum(delta_in, axis=0)
# dx3 = np.square(t) * (v_in - means)
# dx4 = np.sum(delta_in * (v_in - means), axis=0)
#
# ddelta = dx1 * (dx2 - (dx3 * dx4))
return ddelta, dgamma, dbeta
return [delta, 0, 0]
def bn_term_update(g, b, dg, db, momentsg, momentsb):
eps = 1.0e-8
dwg = alpha * dg
dwb = alpha * db
beta = 0.9
momentsg = (beta * momentsg) + ((1 - beta) * np.square(dg))
momentsb = (beta * momentsb) + ((1 - beta) * np.square(db))
rms_momentg= np.sqrt(momentsg) + eps
rms_momentb = np.sqrt(momentsb) + eps
g += dwg / rms_momentg
b += dwb / rms_momentb
return g, b
# Weighted sum of input nodes and weights
def weight_sum(x_data, weights):
v = x_data.dot(weights)
return v
# Activation functions
def activation(v, mode):
y_io = 0
if mode == 'reLU':
y_io = v + 0
np.putmask(y_io, y_io < 0, [0])
# y = y * (y > 0)np.maximum(y, 0, y)
if mode == 'leaky_reLU':
y_io = v + 0
np.putmask(y_io, y_io < 0, y_io * 0.01)
if mode == 'sigmoid':
y_io = 1 / (1 + np.exp(-v))
if mode == 'softmax':
ex = | np.exp(v) | numpy.exp |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of psychological embedding models.
Classes:
Proxy: Proxy class for embedding model.
PsychologicalEmbedding: Abstract base class for a psychological
embedding model.
Functions:
load_model: Load a hdf5 file, that was saved with the `save`
class method, as a PsychologicalEmbedding object.
"""
import copy
import json
import os
from pathlib import Path
import warnings
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.eager import backprop
import tensorflow_probability as tfp
import psiz.keras.layers
import psiz.trials
class Proxy(object):
"""Convenient proxy class for a psychological embedding model.
The embedding procedure jointly infers three components. First, the
embedding algorithm infers a stimulus representation denoted by the
variable z. Second, the embedding algorithm infers the variables
governing the similarity kernel, denoted theta. Third, the
embedding algorithm infers a set of attention weights if there is
more than one group.
Methods:
compile: Assign a optimizer, loss and regularization function
for the optimization procedure.
fit: Fit the embedding model using the provided observations.
evaluate: Evaluate the embedding model using the provided
observations.
similarity: Return the similarity between provided points.
distance: Return the (weighted) minkowski distance between
provided points.
save: Save the embedding model as an hdf5 file.
Attributes: TODO
n_stimuli: The number of unique stimuli in the embedding.
n_dim: The dimensionality of the embedding.
n_group: The number of distinct groups in the embedding.
z: A dictionary with the keys 'value', 'trainable'. The key
'value' contains the actual embedding points. The key
'trainable' is a boolean flag that determines whether
the embedding points are inferred during inference.
theta: Dictionary containing data about the parameter values
governing the similarity kernel. The dictionary contains
the variable names as keys at the first level. For each
variable, there is an additional dictionary containing
the keys 'value', 'trainable', and 'bounds'. The key
'value' indicates the actual value of the parameter. The
key 'trainable' is a boolean flag indicating whether the
variable is trainable during inferene. The key 'bounds'
indicates the bounds of the parameter during inference. The
bounds are specified using a list of two items where the
first item indicates the lower bound and the second item
indicates the upper bound. Use None to indicate no bound.
phi: Dictionary containing data about the group-specific
parameter values. These parameters are only trainable if
there is more than one group. The dictionary contains the
parameter names as keys at the first level. For each
parameter name, there is an additional dictionary
containing the keys 'value' and 'trainable'. The key
'value' indicates the actual value of the parameter. The
key 'trainable' is a boolean flag indicating whether the
variable is trainable during inference. The free parameter
`w` governs dimension-wide weights.
log_freq: The number of epochs to wait between log entries.
"""
def __init__(self, model):
"""Initialize.
Arguments:
model: A TensorFlow model.
"""
super().__init__()
self.model = model
# Unsaved attributes.
self.log_freq = 10
@property
def n_stimuli(self):
"""Getter method for n_stimuli."""
return self.model.n_stimuli
@property
def n_dim(self):
"""Getter method for n_dim."""
return self.model.n_dim
@property
def n_group(self):
"""Getter method for n_group."""
return self.model.n_group
@property
def z(self):
"""Getter method for `z`."""
z = self.model.stimuli.embeddings
if isinstance(z, tfp.distributions.Distribution):
z = z.mode() # NOTE: This will not work for all distributions.
z = z.numpy()
if self.model.stimuli.mask_zero:
if len(z.shape) == 2:
z = z[1:]
else:
z = z[:, 1:]
return z
@property
def w(self):
"""Getter method for `w`."""
if hasattr(self.model.kernel, 'attention'):
w = self.model.kernel.attention.embeddings
if isinstance(w, tfp.distributions.Distribution):
if isinstance(w.distribution, tfp.distributions.LogitNormal):
# For logit-normal distribution, use median instead of
# mode.
# `median = logistic(loc)`.
w = tf.math.sigmoid(w.distribution.loc)
else:
w = w.mode() # NOTE: The mode may be undefined.
w = w.numpy()
if self.model.kernel.attention.mask_zero:
w = w[1:]
else:
w = | np.ones([1, self.n_dim]) | numpy.ones |
import numpy as np
#from scipy import interpolate
import logging
from psdtoolsx.terminology import Enum, Key, Type, Klass
from psdtoolsx.constants import Tag
from psdtoolsx.api.numpy_io import get_pattern, EXPECTED_CHANNELS
logger = logging.getLogger(__name__)
_COLOR_FUNC = {
Klass.RGBColor: lambda x: x / 255.,
Klass.Grayscale: lambda x: (100. - x) / 100.,
Klass.CMYKColor: lambda x: (100 - x) / 100.,
Klass.LabColor: lambda x: x / 255.,
}
def draw_vector_mask(layer):
return _draw_path(layer, brush={'color': 255})
def draw_stroke(layer):
desc = layer.stroke._data
# _CAP = {
# 'strokeStyleButtCap': 0,
# 'strokeStyleSquareCap': 1,
# 'strokeStyleRoundCap': 2,
# }
# _JOIN = {
# 'strokeStyleMiterJoin': 0,
# 'strokeStyleRoundJoin': 2,
# 'strokeStyleBevelJoin': 3,
# }
width = float(desc.get('strokeStyleLineWidth', 1.))
# linejoin = desc.get('strokeStyleLineJoinType', None)
# linejoin = linejoin.enum if linejoin else 'strokeStyleMiterJoin'
# linecap = desc.get('strokeStyleLineCapType', None)
# linecap = linecap.enum if linecap else 'strokeStyleButtCap'
# miterlimit = desc.get('strokeStyleMiterLimit', 100.0) / 100.
# aggdraw >= 1.3.12 will support additional params.
return _draw_path(
layer,
pen={
'color': 255,
'width': width,
# 'linejoin': _JOIN.get(linejoin, 0),
# 'linecap': _CAP.get(linecap, 0),
# 'miterlimit': miterlimit,
}
)
def _draw_path(layer, brush=None, pen=None):
height, width = layer._psd.height, layer._psd.width
color = 0
if layer.vector_mask.initial_fill_rule and \
len(layer.vector_mask.paths) == 0:
color = 1
mask = np.full((height, width, 1), color, dtype=np.float32)
# Group merged path components.
paths = []
for subpath in layer.vector_mask.paths:
if subpath.operation == -1:
paths[-1].append(subpath)
else:
paths.append([subpath])
# Apply shape operation.
first = True
for subpath_list in paths:
plane = _draw_subpath(subpath_list, width, height, brush, pen)
assert mask.shape == (height, width, 1)
assert plane.shape == mask.shape
op = subpath_list[0].operation
if op == 0: # Exclude = Union - Intersect.
mask = mask + plane - 2 * mask * plane
elif op == 1: # Union (Combine).
mask = mask + plane - mask * plane
elif op == 2: # Subtract.
if first and brush:
mask = 1 - mask
mask = np.maximum(0, mask - plane)
elif op == 3: # Intersect.
if first and brush:
mask = 1 - mask
mask = mask * plane
first = False
return np.minimum(1, np.maximum(0, mask))
def _draw_subpath(subpath_list, width, height, brush, pen):
"""
Rasterize Bezier curves.
TODO: Replace aggdraw implementation with skimage.draw.
"""
from PIL import Image
import aggdraw
mask = Image.new('L', (width, height), 0)
draw = aggdraw.Draw(mask)
pen = aggdraw.Pen(**pen) if pen else None
brush = aggdraw.Brush(**brush) if brush else None
for subpath in subpath_list:
if len(subpath) <= 1:
logger.warning('not enough knots: %d' % len(subpath))
continue
path = ' '.join(map(str, _generate_symbol(subpath, width, height)))
symbol = aggdraw.Symbol(path)
draw.symbol((0, 0), symbol, pen, brush)
draw.flush()
del draw
return np.expand_dims(np.array(mask).astype(np.float32) / 255., 2)
def _generate_symbol(path, width, height, command='C'):
"""Sequence generator for SVG path."""
if len(path) == 0:
return
# Initial point.
yield 'M'
yield path[0].anchor[1] * width
yield path[0].anchor[0] * height
yield command
# Closed path or open path
points = (
zip(path, path[1:] +
path[0:1]) if path.is_closed() else zip(path, path[1:])
)
# Rest of the points.
for p1, p2 in points:
yield p1.leaving[1] * width
yield p1.leaving[0] * height
yield p2.preceding[1] * width
yield p2.preceding[0] * height
yield p2.anchor[1] * width
yield p2.anchor[0] * height
if path.is_closed():
yield 'Z'
def create_fill_desc(layer, desc, viewport):
"""Create a fill image."""
if desc.classID == b'solidColorLayer':
return draw_solid_color_fill(viewport, desc)
if desc.classID == b'patternLayer':
return draw_pattern_fill(viewport, layer._psd, desc)
if desc.classID == b'gradientLayer':
return draw_gradient_fill(viewport, desc)
return None, None
def create_fill(layer, viewport):
"""Create a fill image."""
if Tag.SOLID_COLOR_SHEET_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.SOLID_COLOR_SHEET_SETTING)
return draw_solid_color_fill(viewport, desc)
if Tag.PATTERN_FILL_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.PATTERN_FILL_SETTING)
return draw_pattern_fill(viewport, layer._psd, desc)
if Tag.GRADIENT_FILL_SETTING in layer.tagged_blocks:
desc = layer.tagged_blocks.get_data(Tag.GRADIENT_FILL_SETTING)
return draw_gradient_fill(viewport, desc)
if Tag.VECTOR_STROKE_CONTENT_DATA in layer.tagged_blocks:
stroke = layer.tagged_blocks.get_data(Tag.VECTOR_STROKE_DATA)
if not stroke or stroke.get('fillEnabled').value is True:
desc = layer.tagged_blocks.get_data(Tag.VECTOR_STROKE_CONTENT_DATA)
if Key.Color in desc:
return draw_solid_color_fill(viewport, desc)
elif Key.Pattern in desc:
return draw_pattern_fill(viewport, layer._psd, desc)
elif Key.Gradient in desc:
return draw_gradient_fill(viewport, desc)
return None, None
def draw_solid_color_fill(viewport, desc):
"""
Create a solid color fill.
"""
color_desc = desc.get(Key.Color)
color_fn = _COLOR_FUNC.get(color_desc.classID, 1.0)
fill = [color_fn(x) for x in color_desc.values()]
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
color = np.full((height, width, len(fill)), fill, dtype=np.float32)
return color, None
def draw_pattern_fill(viewport, psd, desc):
"""
Create a pattern fill.
"""
pattern_id = desc[Enum.Pattern][Key.ID].value.rstrip('\x00')
pattern = psd._get_pattern(pattern_id)
if not pattern:
logger.error('Pattern not found: %s' % (pattern_id))
return None, None
panel = get_pattern(pattern)
assert panel.shape[0] > 0
scale = float(desc.get(Key.Scale, 100.)) / 100.
if scale != 1.:
from skimage.transform import resize
new_shape = (
max(1, int(panel.shape[0] * scale)),
max(1, int(panel.shape[1] * scale))
)
panel = resize(panel, new_shape)
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
reps = (
int(np.ceil(float(height) / panel.shape[0])),
int(np.ceil(float(width) / panel.shape[1])),
1,
)
channels = EXPECTED_CHANNELS.get(pattern.image_mode)
pixels = np.tile(panel, reps)[:height, :width, :]
if pixels.shape[2] > channels:
return pixels[:, :, :channels], pixels[:, :, -1:]
return pixels, None
def draw_gradient_fill(viewport, desc):
"""
Create a gradient fill image.
"""
height, width = viewport[3] - viewport[1], viewport[2] - viewport[0]
angle = float(desc.get(Key.Angle, 0))
scale = float(desc.get(Key.Scale, 100.)) / 100.
ratio = (angle % 90)
scale *= (90. - ratio) / 90. * width + (ratio / 90.) * height
X, Y = np.meshgrid(
np.linspace(-width / scale, width / scale, width, dtype=np.float32),
np.linspace(-height / scale, height / scale, height, dtype=np.float32),
)
gradient_kind = desc.get(Key.Type).enum
if gradient_kind == Enum.Linear:
Z = _make_linear_gradient(X, Y, angle)
elif gradient_kind == Enum.Radial:
Z = _make_radial_gradient(X, Y)
elif gradient_kind == Enum.Angle:
Z = _make_angle_gradient(X, Y, angle)
elif gradient_kind == Enum.Reflected:
Z = _make_reflected_gradient(X, Y, angle)
elif gradient_kind == Enum.Diamond:
Z = _make_diamond_gradient(X, Y, angle)
else:
# Unsupported: b'shapeburst', only avail in stroke effect
logger.warning('Unknown gradient style: %s.' % (gradient_kind))
Z = | np.full((height, width), 0.5, dtype=np.float32) | numpy.full |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.