prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import matplotlib.pyplot as plt
import tifffile
import numpy as np
from polaris.micro import multi
from polaris import spang
def abc2theta(abc, theta):
return abc[0] + abc[1]*np.cos(2*theta) + abc[2]* | np.sin(2*theta) | numpy.sin |
import pytest
from unittest.mock import patch, MagicMock
import open_cp.predictors as testmod
import open_cp.data
import numpy as np
def test_DataTrainer():
test = testmod.DataTrainer()
with pytest.raises(TypeError):
test.data = "string"
test.data = testmod.data.TimedPoints([],[[],[]])
def test_GridPrediction_grid_coord():
g = testmod.GridPrediction(10, 20, xoffset=5, yoffset=15)
assert(g.grid_coord(5, 15) == (0,0))
assert(g.grid_coord(14, 34) == (0,0))
assert(g.grid_coord(15, 34) == (1,0))
assert(g.grid_coord(14, 35) == (0,1))
assert(g.grid_coord(15, 35) == (1,1))
def test_GridPrediction_bounding_box_of_cell():
g = testmod.GridPrediction(10, 20, xoffset=5, yoffset=15)
bb = g.bounding_box_of_cell(0,0)
assert(bb.min == (5,15))
assert(bb.max == (15,35))
def test_GridPrediction():
class Test(testmod.GridPrediction):
def __init__(self, xsize, ysize, xoffset = 0, yoffset = 0):
super().__init__(xsize, ysize, xoffset, yoffset)
def grid_risk(self, gx, gy):
assert self.want == (gx, gy)
test = Test(10, 10)
test.want = (0, 0)
test.risk(5, 6)
test.want = (1, 2)
test.risk(12, 21)
def test_grid_risk():
mat = np.random.random((10,10))
pred = testmod.GridPredictionArray(10,15,mat,-20,-45)
assert pred.grid_risk(0, 0) == mat[0, 0]
assert pred.grid_risk(2, 3) == mat[3, 2]
np.testing.assert_allclose(pred.grid_risk([2,3], [4,6]), [mat[4,2], mat[6,3]])
assert pred.risk(-20, -45) == mat[0, 0]
assert pred.risk(0, 0) == mat[3, 2]
np.testing.assert_allclose(pred.risk([0,-20], [0,-45]), [mat[3,2], mat[0,0]])
def test_GridPrediction_with_offset():
class Test(testmod.GridPrediction):
def __init__(self, xsize, ysize, xoffset = 0, yoffset = 0):
super().__init__(xsize, ysize, xoffset, yoffset)
def grid_risk(self, gx, gy):
assert self.want == (gx, gy)
test = Test(10, 10, 25, 30)
test.want = (0, 0)
test.risk(25, 30)
test.want = (0, 0)
test.risk(34, 39)
test.want = (1, 2)
test.risk(25 + 15, 30 + 20 + 8)
def a_valid_grid_prediction_array():
matrix = np.array([[1,2,3], [4,5,6]])
return testmod.GridPredictionArray(10, 10, matrix)
def test_GridPredictionArray():
gpa = a_valid_grid_prediction_array()
assert gpa.grid_risk(-1, 0) == 0
assert gpa.grid_risk(0, -1) == 0
assert gpa.grid_risk(0, 0) == 1
assert gpa.grid_risk(2, 1) == 6
assert gpa.grid_risk(2, 0) == 3
assert gpa.grid_risk(3, 0) == 0
assert gpa.grid_risk(0, 2) == 0
def test_GridPredictionArray_intensity_matrix_property():
gpa = a_valid_grid_prediction_array()
np.testing.assert_allclose( gpa.intensity_matrix, [[1,2,3], [4,5,6]] )
def test_GridPredictionArray_mesh_data():
gpa = a_valid_grid_prediction_array()
xcs, ycs = gpa.mesh_data()
np.testing.assert_allclose( xcs, [0, 10, 20, 30] )
np.testing.assert_allclose( ycs, [0, 10, 20] )
def test_GridPredictionArray_clone():
matrix = np.array([[1,2,3], [4,5,6]])
gpa = testmod.GridPredictionArray(5, 10, matrix, 1, 2)
cl = gpa.clone()
assert (gpa.xoffset, gpa.yoffset) == (cl.xoffset, cl.yoffset)
assert (gpa.xextent, gpa.yextent) == (cl.xextent, cl.yextent)
assert (gpa.xsize, gpa.ysize) == (cl.xsize, cl.ysize)
np.testing.assert_allclose(gpa.intensity_matrix, cl.intensity_matrix)
cl.intensity_matrix[0] = [7,8,9]
np.testing.assert_allclose(gpa.intensity_matrix, [[1,2,3],[4,5,6]])
np.testing.assert_allclose(cl.intensity_matrix, [[7,8,9],[4,5,6]])
def test_GridPredictionArray_masked_clone():
mask = np.array([[True, True, False], [False, False, True]])
matrix = np.ma.masked_array([[1,2,3], [4,5,6]], mask=mask)
gpa = testmod.GridPredictionArray(5, 10, matrix, 1, 2)
cl = gpa.clone()
assert (gpa.xoffset, gpa.yoffset) == (cl.xoffset, cl.yoffset)
assert (gpa.xextent, gpa.yextent) == (cl.xextent, cl.yextent)
assert (gpa.xsize, gpa.ysize) == (cl.xsize, cl.ysize)
np.testing.assert_allclose(gpa.intensity_matrix, cl.intensity_matrix)
cl.intensity_matrix[0] = [7,8,9]
cl.intensity_matrix.mask[0] = [True, True, False]
cl.intensity_matrix.mask[1,1] = True
| np.testing.assert_allclose(gpa.intensity_matrix, [[1,2,3],[4,5,6]]) | numpy.testing.assert_allclose |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the CV parameter-shift CVParamShiftTape"""
import pytest
import numpy as np
import pennylane as qml
from pennylane.tape import CVParamShiftTape
hbar = 2
class TestGradMethod:
"""Tests for parameter gradient methods"""
def test_non_differentiable(self):
"""Test that a non-differentiable parameter is
correctly marked"""
with CVParamShiftTape() as tape:
qml.FockState(1, wires=0)
qml.Displacement(0.543, 0, wires=[1])
qml.Beamsplitter(0, 0, wires=[0, 1])
qml.expval(qml.X(wires=[0]))
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "A"
assert tape._grad_method(2) == "A"
assert tape._grad_method(3) == "A"
assert tape._grad_method(4) == "A"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] is None
assert tape._par_info[1]["grad_method"] == "A"
assert tape._par_info[2]["grad_method"] == "A"
assert tape._par_info[3]["grad_method"] == "A"
assert tape._par_info[4]["grad_method"] == "A"
def test_no_graph_exception(self):
"""Test that an exception is raised for analytically differentiable
operations if use_graph=False"""
with CVParamShiftTape() as tape:
qml.Rotation(0.543, wires=[0])
qml.expval(qml.P(0))
with pytest.raises(ValueError, match="must always use the graph"):
tape._grad_method(0, use_graph=False)
def test_independent(self):
"""Test that an independent variable is properly marked
as having a zero gradient"""
with CVParamShiftTape() as tape:
qml.Rotation(0.543, wires=[0])
qml.Rotation(-0.654, wires=[1])
qml.expval(qml.P(0))
assert tape._grad_method(0) == "A"
assert tape._grad_method(1) == "0"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] == "A"
assert tape._par_info[1]["grad_method"] == "0"
def test_finite_diff(self, monkeypatch):
"""If an op has grad_method=F, this should be respected
by the CVParamShiftTape"""
monkeypatch.setattr(qml.Rotation, "grad_method", "F")
with CVParamShiftTape() as tape:
qml.Rotation(0.543, wires=[0])
qml.Squeezing(0.543, 0, wires=[0])
qml.expval(qml.P(0))
assert tape._grad_method(0) == "F"
assert tape._grad_method(1) == "A"
assert tape._grad_method(2) == "A"
def test_non_gaussian_operation(self):
"""Test that a non-Gaussian operation succeeding
a differentiable Gaussian operation results in
numeric differentiation."""
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.Rotation(1.0, wires=[1])
# Non-Gaussian
qml.Kerr(1.0, wires=[1])
qml.expval(qml.P(0))
qml.expval(qml.X(1))
# First rotation gate has no succeeding non-Gaussian operation
assert tape._grad_method(0) == "A"
# Second rotation gate does no succeeding non-Gaussian operation
assert tape._grad_method(1) == "F"
# Kerr gate does not support the parameter-shift rule
assert tape._grad_method(2) == "F"
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.Rotation(1.0, wires=[1])
# entangle the modes
qml.Beamsplitter(1.0, 0.0, wires=[0, 1])
# Non-Gaussian
qml.Kerr(1.0, wires=[1])
qml.expval(qml.P(0))
qml.expval(qml.X(1))
# After entangling the modes, the Kerr gate now succeeds
# both initial rotations
assert tape._grad_method(0) == "F"
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "F"
def test_probability(self):
"""Probability is the expectation value of a
higher order observable, and thus only supports numerical
differentiation"""
with CVParamShiftTape() as tape:
qml.Rotation(0.543, wires=[0])
qml.Squeezing(0.543, 0, wires=[0])
qml.probs(wires=0)
assert tape._grad_method(0) == "F"
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "F"
def test_variance(self):
"""If the variance of the observable is first order, then
parameter-shift is supported. If the observable is second order,
however, only finite-differences is supported."""
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.var(qml.P(0)) # first order
assert tape._grad_method(0) == "A"
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.var(qml.NumberOperator(0)) # second order
assert tape._grad_method(0) == "F"
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.Rotation(1.0, wires=[1])
qml.Beamsplitter(0.5, 0.0, wires=[0, 1])
qml.var(qml.NumberOperator(0)) # second order
qml.expval(qml.NumberOperator(1))
assert tape._grad_method(0) == "F"
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "F"
assert tape._grad_method(3) == "F"
def test_second_order_expectation(self):
"""Test that the expectation of a second-order observable forces
the gradient method to use the second-order parameter-shift rule"""
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.expval(qml.NumberOperator(0)) # second order
assert tape._grad_method(0) == "A2"
def test_unknown_op_grad_method(self, monkeypatch):
"""Test that an exception is raised if an operator has a
grad method defined that the CV parameter-shift tape
doesn't recognize"""
monkeypatch.setattr(qml.Rotation, "grad_method", "B")
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=0)
qml.expval(qml.X(0))
with pytest.raises(ValueError, match="unknown gradient method"):
tape._grad_method(0)
class TestTransformObservable:
"""Tests for the _transform_observable method"""
def test_incorrect_heisenberg_size(self, monkeypatch):
"""The number of dimensions of a CV observable Heisenberg representation does
not match the ev_order attribute."""
monkeypatch.setattr(qml.P, "ev_order", 2)
with pytest.raises(ValueError, match="Mismatch between the polynomial order"):
CVParamShiftTape._transform_observable(qml.P(0), np.identity(3), device_wires=[0])
def test_higher_order_observable(self, monkeypatch):
"""An exception should be raised if the observable is higher than 2nd order."""
monkeypatch.setattr(qml.P, "ev_order", 3)
with pytest.raises(NotImplementedError, match="order > 2 not implemented"):
CVParamShiftTape._transform_observable(qml.P(0), np.identity(3), device_wires=[0])
def test_first_order_transform(self, tol):
"""Test that a first order observable is transformed correctly"""
# create a symmetric transformation
Z = np.arange(3**2).reshape(3, 3)
Z = Z.T + Z
obs = qml.X(0)
res = CVParamShiftTape._transform_observable(obs, Z, device_wires=[0])
# The Heisenberg representation of the X
# operator is simply... X
expected = np.array([0, 1, 0]) @ Z
assert isinstance(res, qml.PolyXP)
assert res.wires.labels == (0,)
assert np.allclose(res.data[0], expected, atol=tol, rtol=0)
def test_second_order_transform(self, tol):
"""Test that a second order observable is transformed correctly"""
# create a symmetric transformation
Z = np.arange(3**2).reshape(3, 3)
Z = Z.T + Z
obs = qml.NumberOperator(0)
res = CVParamShiftTape._transform_observable(obs, Z, device_wires=[0])
# The Heisenberg representation of the number operator
# is (X^2 + P^2) / (2*hbar) - 1/2
A = np.array([[-0.5, 0, 0], [0, 0.25, 0], [0, 0, 0.25]])
expected = A @ Z + Z @ A
assert isinstance(res, qml.PolyXP)
assert res.wires.labels == (0,)
assert np.allclose(res.data[0], expected, atol=tol, rtol=0)
def test_device_wire_expansion(self, tol):
"""Test that the transformation works correctly
for the case where the transformation applies to more wires
than the observable."""
# create a 3-mode symmetric transformation
wires = qml.wires.Wires([0, "a", 2])
ndim = 1 + 2 * len(wires)
Z = np.arange(ndim**2).reshape(ndim, ndim)
Z = Z.T + Z
obs = qml.NumberOperator(0)
res = CVParamShiftTape._transform_observable(obs, Z, device_wires=wires)
# The Heisenberg representation of the number operator
# is (X^2 + P^2) / (2*hbar) - 1/2. We use the ordering
# I, X0, Xa, X2, P0, Pa, P2.
A = np.diag([-0.5, 0.25, 0.25, 0, 0, 0, 0])
expected = A @ Z + Z @ A
assert isinstance(res, qml.PolyXP)
assert res.wires == wires
assert np.allclose(res.data[0], expected, atol=tol, rtol=0)
class TestParameterShiftLogic:
"""Test for the dispatching logic of the parameter shift method"""
def test_force_order2(self, mocker):
"""Test that if the force_order2 keyword argument is provided,
the second order parameter shift rule is forced"""
dev = qml.device("default.gaussian", wires=1)
with CVParamShiftTape() as tape:
qml.Displacement(1.0, 0.0, wires=[0])
qml.Rotation(2.0, wires=[0])
qml.expval(qml.X(0))
tape.trainable_params = {0, 1, 2}
spy1 = mocker.spy(tape, "parameter_shift_first_order")
spy2 = mocker.spy(tape, "parameter_shift_second_order")
tape.jacobian(dev, method="analytic", force_order2=False)
spy1.assert_called()
spy2.assert_not_called()
tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
def test_no_poly_xp_support(self, mocker, monkeypatch, caplog):
"""Test that if a device does not support PolyXP
and the second-order parameter-shift rule is required,
we fallback to finite differences."""
dev = qml.device("default.gaussian", wires=1)
monkeypatch.delitem(dev._observable_map, "PolyXP")
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.expval(qml.NumberOperator(0))
tape.trainable_params = {0}
assert tape.analytic_pd == tape.parameter_shift
spy_analytic = mocker.spy(tape, "analytic_pd")
spy_first_order_shift = mocker.spy(tape, "parameter_shift_first_order")
spy_second_order_shift = mocker.spy(tape, "parameter_shift_second_order")
spy_transform = mocker.spy(qml.operation.CVOperation, "heisenberg_tr")
spy_numeric = mocker.spy(tape, "numeric_pd")
with pytest.warns(UserWarning, match="does not support the PolyXP observable"):
tape.jacobian(dev, method="analytic")
spy_analytic.assert_called()
spy_first_order_shift.assert_not_called()
spy_second_order_shift.assert_not_called()
spy_transform.assert_not_called()
spy_numeric.assert_called()
def test_no_poly_xp_support_variance(self, mocker, monkeypatch, caplog):
"""Test that if a device does not support PolyXP
and the variance parameter-shift rule is required,
we fallback to finite differences."""
dev = qml.device("default.gaussian", wires=1)
monkeypatch.delitem(dev._observable_map, "PolyXP")
with CVParamShiftTape() as tape:
qml.Rotation(1.0, wires=[0])
qml.var(qml.X(0))
tape.trainable_params = {0}
assert tape.analytic_pd == tape.parameter_shift_var
spy1 = mocker.spy(tape, "parameter_shift_first_order")
spy2 = mocker.spy(tape, "parameter_shift_second_order")
spy_numeric = mocker.spy(tape, "numeric_pd")
with pytest.warns(UserWarning, match="does not support the PolyXP observable"):
tape.jacobian(dev, method="analytic")
spy1.assert_not_called()
spy2.assert_not_called()
spy_numeric.assert_called()
class TestExpectationQuantumGradients:
"""Tests for the quantum gradients of various gates
with expectation value output"""
def test_rotation_gradient(self, mocker, tol):
"""Test the gradient of the rotation gate"""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
alpha = 0.5643
theta = 0.23354
with CVParamShiftTape() as tape:
qml.Displacement(alpha, 0.0, wires=[0])
qml.Rotation(theta, wires=[0])
qml.expval(qml.X(0))
tape._update_gradient_info()
tape.trainable_params = {2}
spy1 = mocker.spy(CVParamShiftTape, "parameter_shift_first_order")
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A = tape.jacobian(dev, method="analytic")
spy1.assert_called()
spy2.assert_not_called()
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
expected = -hbar * alpha * np.sin(theta)
assert np.allclose(grad_A, expected, atol=tol, rtol=0)
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
def test_beamsplitter_gradient(self, mocker, tol):
"""Test the gradient of the beamsplitter gate"""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
alpha = 0.5643
theta = 0.23354
with CVParamShiftTape() as tape:
qml.Displacement(alpha, 0.0, wires=[0])
qml.Beamsplitter(theta, 0.0, wires=[0, 1])
qml.expval(qml.X(0))
tape._update_gradient_info()
tape.trainable_params = {2}
spy1 = mocker.spy(CVParamShiftTape, "parameter_shift_first_order")
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A = tape.jacobian(dev, method="analytic")
spy1.assert_called()
spy2.assert_not_called()
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
expected = -hbar * alpha * np.sin(theta)
assert np.allclose(grad_A, expected, atol=tol, rtol=0)
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
def test_displacement_gradient(self, mocker, tol):
"""Test the gradient of the displacement gate"""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
r = 0.5643
phi = 0.23354
with CVParamShiftTape() as tape:
qml.Displacement(r, phi, wires=[0])
qml.expval(qml.X(0))
tape._update_gradient_info()
tape.trainable_params = {0, 1}
spy1 = mocker.spy(CVParamShiftTape, "parameter_shift_first_order")
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A = tape.jacobian(dev, method="analytic")
spy1.assert_called()
spy2.assert_not_called()
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
expected = [hbar * np.cos(phi), -hbar * r * np.sin(phi)]
assert np.allclose(grad_A, expected, atol=tol, rtol=0)
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
def test_squeezed_gradient(self, mocker, tol):
"""Test the gradient of the squeezed gate. We also
ensure that the gradient is correct even when an operation
with no Heisenberg representation is a descendent."""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
class Rotation(qml.operation.CVOperation):
"""Dummy operation that does not support
heisenberg representation"""
num_wires = 1
num_params = 1
grad_method = "A"
alpha = 0.5643
r = 0.23354
with CVParamShiftTape() as tape:
qml.Displacement(alpha, 0.0, wires=[0])
qml.Squeezing(r, 0.0, wires=[0])
# The following two gates have no effect
# on the circuit gradient and expectation value
qml.Beamsplitter(0.0, 0.0, wires=[0, 1])
Rotation(0.543, wires=[1])
qml.expval(qml.X(0))
tape._update_gradient_info()
tape.trainable_params = {2}
spy1 = mocker.spy(CVParamShiftTape, "parameter_shift_first_order")
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A = tape.jacobian(dev, method="analytic")
spy1.assert_called()
spy2.assert_not_called()
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
expected = -np.exp(-r) * hbar * alpha
assert np.allclose(grad_A, expected, atol=tol, rtol=0)
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
def test_squeezed_number_state_gradient(self, mocker, tol):
"""Test the numerical gradient of the squeeze gate with
with number state expectation is correct"""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
r = 0.23354
with CVParamShiftTape() as tape:
qml.Squeezing(r, 0.0, wires=[0])
# the fock state projector is a 'non-Gaussian' observable
qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))
tape._update_gradient_info()
tape.trainable_params = {0}
assert tape._par_info[0]["grad_method"] == "F"
spy = mocker.spy(CVParamShiftTape, "parameter_shift")
grad = tape.jacobian(dev)
spy.assert_not_called()
# (d/dr) |<2|S(r)>|^2 = 0.5 tanh(r)^3 (2 csch(r)^2 - 1) sech(r)
expected = 0.5 * np.tanh(r) ** 3 * (2 / (np.sinh(r) ** 2) - 1) / np.cosh(r)
assert np.allclose(grad, expected, atol=tol, rtol=0)
def test_multiple_squeezing_gradient(self, mocker, tol):
"""Test that the gradient of a circuit with two squeeze
gates is correct."""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
r0, phi0, r1, phi1 = [0.4, -0.3, -0.7, 0.2]
with CVParamShiftTape() as tape:
qml.Squeezing(r0, phi0, wires=[0])
qml.Squeezing(r1, phi1, wires=[0])
qml.expval(qml.NumberOperator(0)) # second order
tape._update_gradient_info()
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
# check against the known analytic formula
expected = np.zeros([4])
expected[0] = np.cosh(2 * r1) * np.sinh(2 * r0) + np.cos(phi0 - phi1) * np.cosh(
2 * r0
) * np.sinh(2 * r1)
expected[1] = -0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1)
expected[2] = np.cos(phi0 - phi1) * np.cosh(2 * r1) * np.sinh(2 * r0) + np.cosh(
2 * r0
) * np.sinh(2 * r1)
expected[3] = 0.5 * np.sin(phi0 - phi1) * np.sinh(2 * r0) * np.sinh(2 * r1)
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
def test_multiple_second_order_observables(self, mocker, tol):
"""Test that the gradient of a circuit with multiple
second order observables is correct"""
dev = qml.device("default.gaussian", wires=2, hbar=hbar)
r = [0.4, -0.7, 0.1, 0.2]
p = [0.1, 0.2, 0.3, 0.4]
with CVParamShiftTape() as tape:
qml.Squeezing(r[0], p[0], wires=[0])
qml.Squeezing(r[1], p[1], wires=[0])
qml.Squeezing(r[2], p[2], wires=[1])
qml.Squeezing(r[3], p[3], wires=[1])
qml.expval(qml.NumberOperator(0)) # second order
qml.expval(qml.NumberOperator(1)) # second order
tape._update_gradient_info()
spy2 = mocker.spy(CVParamShiftTape, "parameter_shift_second_order")
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy2.assert_called()
# check against the known analytic formula
def expected_grad(r, p):
return np.array(
[
np.cosh(2 * r[1]) * np.sinh(2 * r[0])
+ np.cos(p[0] - p[1]) * np.cosh(2 * r[0]) * np.sinh(2 * r[1]),
-0.5 * np.sin(p[0] - p[1]) * np.sinh(2 * r[0]) * np.sinh(2 * r[1]),
np.cos(p[0] - p[1]) * np.cosh(2 * r[1]) * np.sinh(2 * r[0])
+ np.cosh(2 * r[0]) * np.sinh(2 * r[1]),
0.5 * np.sin(p[0] - p[1]) * np.sinh(2 * r[0]) * np.sinh(2 * r[1]),
]
)
expected = np.zeros([2, 8])
expected[0, :4] = expected_grad(r[:2], p[:2])
expected[1, 4:] = expected_grad(r[2:], p[2:])
assert np.allclose(grad_A2, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("obs", [qml.X, qml.Identity])
@pytest.mark.parametrize(
"op", [qml.Displacement(0.1, 0.2, wires=0), qml.TwoModeSqueezing(0.1, 0.2, wires=[0, 1])]
)
def test_gradients_gaussian_circuit(self, op, obs, mocker, tol):
"""Tests that the gradients of circuits of gaussian gates match between the
finite difference and analytic methods."""
tol = 1e-2
with CVParamShiftTape() as tape:
qml.Displacement(0.5, 0, wires=0)
qml.apply(op)
qml.Beamsplitter(1.3, -2.3, wires=[0, 1])
qml.Displacement(-0.5, 0.1, wires=0)
qml.Squeezing(0.5, -1.5, wires=0)
qml.Rotation(-1.1, wires=0)
qml.expval(obs(wires=0))
dev = qml.device("default.gaussian", wires=2)
res = tape.execute(dev)
tape._update_gradient_info()
tape.trainable_params = set(range(2, 2 + op.num_params))
# check that every parameter is analytic
for i in range(op.num_params):
assert tape._par_info[2 + i]["grad_method"][0] == "A"
spy = mocker.spy(CVParamShiftTape, "parameter_shift_first_order")
grad_F = tape.jacobian(dev, method="numeric")
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
spy.assert_not_called()
assert np.allclose(grad_A2, grad_F, atol=tol, rtol=0)
if obs.ev_order == 1:
grad_A = tape.jacobian(dev, method="analytic")
spy.assert_called()
assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)
@pytest.mark.parametrize("t", [0, 1])
def test_interferometer_unitary(self, t, tol):
"""An integration test for CV gates that support analytic differentiation
if succeeding the gate to be differentiated, but cannot be differentiated
themselves (for example, they may be Gaussian but accept no parameters,
or may accept a numerical array parameter.).
This ensures that, assuming their _heisenberg_rep is defined, the quantum
gradient analytic method can still be used, and returns the correct result.
Currently, the only such operation is qml.InterferometerUnitary. In the future,
we may consider adding a qml.GaussianTransfom operator.
"""
if t == 1:
pytest.xfail(
"There is a bug in the second order CV parameter-shift rule; "
"phase arguments return the incorrect derivative."
)
# Note: this bug currently affects PL core as well:
#
# dev = qml.device("default.gaussian", wires=2)
#
# U = np.array([[ 0.51310276+0.81702166j, 0.13649626+0.22487759j],
# [ 0.26300233+0.00556194j, -0.96414101-0.03508489j]])
#
# @qml.qnode(dev)
# def circuit(r, phi):
# qml.Displacement(r, phi, wires=0)
# qml.InterferometerUnitary(U, wires=[0, 1])
# return qml.expval(qml.X(0))
#
# r = 0.543
# phi = 0.
#
# >>> print(circuit.jacobian([r, phi], options={"force_order2":False}))
# [[ 1.02620552 0.14823494]]
# >>> print(circuit.jacobian([r, phi], options={"force_order2":True}))
# [[ 1.02620552 -0.88728552]]
U = np.array(
[
[0.51310276 + 0.81702166j, 0.13649626 + 0.22487759j],
[0.26300233 + 0.00556194j, -0.96414101 - 0.03508489j],
]
)
with CVParamShiftTape() as tape:
qml.Displacement(0.543, 0, wires=0)
qml.InterferometerUnitary(U, wires=[0, 1])
qml.expval(qml.X(0))
tape._update_gradient_info()
tape.trainable_params = {t}
assert tape._par_info[0]["grad_method"] == "A"
assert tape._par_info[1]["grad_method"] == "A"
dev = qml.device("default.gaussian", wires=2)
grad_F = tape.jacobian(dev, method="numeric")
grad_A = tape.jacobian(dev, method="analytic")
grad_A2 = tape.jacobian(dev, method="analytic", force_order2=True)
# the different methods agree
assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)
assert np.allclose(grad_A2, grad_F, atol=tol, rtol=0)
class TestVarianceQuantumGradients:
"""Tests for the quantum gradients of various gates
with variance measurements"""
def test_first_order_observable(self, tol):
"""Test variance of a first order CV observable"""
dev = qml.device("default.gaussian", wires=1)
r = 0.543
phi = -0.654
with CVParamShiftTape() as tape:
qml.Squeezing(r, 0, wires=0)
qml.Rotation(phi, wires=0)
qml.var(qml.X(0))
tape.trainable_params = {0, 2}
res = tape.execute(dev)
expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2
assert np.allclose(res, expected, atol=tol, rtol=0)
# circuit jacobians
grad_F = tape.jacobian(dev, method="numeric")
grad_A = tape.jacobian(dev, method="analytic")
expected = np.array(
[
[
2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2,
2 * np.sinh(2 * r) * np.sin(2 * phi),
]
]
)
assert | np.allclose(grad_A, expected, atol=tol, rtol=0) | numpy.allclose |
"""Test model."""
import argparse
import base64
from datetime import datetime
import os
import shutil
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
#import lineDetector
import torch
from torch.autograd import Variable
import torchvision.transforms as transforms
import matplotlib.pylab as plt
#from lineDetector import canny_edge_detector, create_coordinates, region_of_interest, display_lines, average_slope_intercept
import numpy as np
from model import *
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
transformations = transforms.Compose(
[transforms.Lambda(lambda x: (x / 127.5) - 1.0)])
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(10, 0.0010)
set_speed = 20
controller.set_desired(set_speed)
# MAX_SPEED = 15
# MIN_SPEED = 10
# speed_limit = MAX_SPEED
#image_buf = np.zeros((1, 59, 255, 3))
#state_buf = np.zeros((1,4))
#font = cv2.FONT_HERSHEY_SIMPLEX
#bottomLeftCornerOfText = (10,10)
#fontScale = 0.3
#fontColor = (255,0,0)
#lineType = 2
def drawDirection(img, steeringAngle):
centerOfImg = (150, 150)
axesLength = (25, 100)
angleRot = 0
startAngle = 0
endAngle = int(90 * steeringAngle / 1.2)
colorRed = (0, 0, 255)
colorGreen = (0, 255, 0)
thickness = 2
#image = cv2.ellipse(image, center_coordinates, axesLength, anglerot, startAngle, endAngle, color, thickness)
if steeringAngle > 0:
center_coordinates = (150 + 25, 140)
startAngle = 0
angleRot = 180
poly = cv2.ellipse2Poly(center_coordinates, axesLength, angleRot,
startAngle, endAngle, 10)
polyarray = np.array(poly)
polyCasted = | np.int32([polyarray]) | numpy.int32 |
import logging
from os.path import dirname, join, realpath
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import astroplan as ap
from scipy.constants import c as c_light_ms
from tqdm import tqdm
from skimage import io
from skimage import transform as tf
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.stats import ttest_ind, norm, t
from astropy import units as u
from astropy.constants import c
from astropy.utils.iers import IERS_Auto
from cats.simulator.detector import Crires
from cats.extractor.runner import CatsRunner
from exoorbit.orbit import Orbit
# TODO List:
# - automatically mask points before fitting with SME
# - if star and planet steps aren't run manually, we use the initial values
# instead we should load the data if possible
# - Tests for all the steps
# - Refactoring of the steps, a lot of the code is strewm all over the place
# - Determine Uncertainties for each point
def shear(x, shear=1, inplace=False):
afine_tf = tf.AffineTransform(shear=shear)
modified = tf.warp(x, inverse_map=afine_tf)
return modified
def gauss(x, height, mu, sig, floor):
return height * np.exp(-(((x - mu) / sig) ** 2) / 2) + floor
def gaussfit(x, y, p0=None):
"""
Fit a simple gaussian to data
gauss(x, a, mu, sigma, floor) = a * exp(-z**2/2) + floor
with z = (x - mu) / sigma
Parameters
----------
x : array(float)
x values
y : array(float)
y values
Returns
-------
gauss(x), parameters
fitted values for x, fit paramters (a, mu, sigma)
"""
if p0 is None:
p0 = [np.max(y) - np.min(y), 0, 1, np.min(y)]
popt, _ = curve_fit(gauss, x, y, p0=p0)
return gauss(x, *popt), popt
def welch_t(a, b, ua=None, ub=None):
# t = (mean(a) - mean(b)) / sqrt(std(a)**2 + std(b)**2)
if ua is None:
ua = a.std() / np.sqrt(a.size)
if ub is None:
ub = b.std() / np.sqrt(b.size)
xa = a.mean()
xb = b.mean()
t = (xa - xb) / np.sqrt(ua**2 + ub**2)
return t
# Update IERS tables if necessary
IERS_Auto()
# Detector
setting = "K/2/4"
detectors = [1, 2, 3]
orders = [7, 6, 5, 4, 3, 2]
detector = Crires(setting, detectors, orders=orders)
# Linelist
linelist = join(dirname(__file__), "crires_k_2_4.lin")
# Star info
star = "WASP-107"
planet = "b"
# Initialize the CATS runner
dataset = "WASP-107b_SNR200"
base_dir = realpath(join(dirname(__file__), f"../datasets/{dataset}"))
raw_dir = join(base_dir, "Spectrum_00")
medium_dir = join(base_dir, "medium")
done_dir = join(base_dir, "done")
runner = CatsRunner(
detector,
star,
planet,
linelist,
base_dir=base_dir,
raw_dir=raw_dir,
medium_dir=medium_dir,
done_dir=done_dir,
)
rv_step = 0.25
rv_range = 200
runner.configuration["cross_correlation"]["rv_range"] = rv_range
runner.configuration["cross_correlation"]["rv_points"] = int((2 * rv_range + 1) / rv_step)
runner.configuration["cross_correlation_reference"]["rv_range"] = rv_range
runner.configuration["cross_correlation_reference"]["rv_points"] = int((2 * rv_range + 1) / rv_step)
# Override data with known information
star = runner.star
planet = runner.planet
orbit = Orbit(star, planet)
planet.radius = 1 * u.Rjup
planet.mass = 1 * u.Mjup
atmosphere_height = planet.atm_scale_height(star.teff)
snr = star.radius ** 2 / (2 * planet.radius * atmosphere_height)
snr = snr.decompose()
velocity_semi_amplitude = orbit.radial_velocity_semiamplitude_planet()
t_exp = c / (2 * np.pi * velocity_semi_amplitude) * planet.period / detector.resolution
t_exp = t_exp.decompose()
print("SNR required: ", snr)
print("Maximum exposure time: ", t_exp)
print(f"Planet Velocity Kp {velocity_semi_amplitude.to('km/s')}")
# Run the Runner
# data = runner.run(["solve_problem"])
# d = data["solve_problem"]
# for k, v in d.items():
# plt.plot(v.wavelength, v.flux, label=f"{k}")
# plt.legend()
# plt.show()
# data = runner.run_module("cross_correlation_reference", load=False)
data = runner.run_module("cross_correlation", load=True)
spectra = runner.data["spectra"]
# Barycentric correction
observer = ap.Observer.at_site("paranal")
obstime = spectra.datetime[len(spectra)//2]
sky_location = star.coordinates
sky_location.obstime = obstime
sky_location.location = observer.location
correction = sky_location.radial_velocity_correction()
# runner.steps["cross_correlation"].plot(data, sysrem_iterations=5, sysrem_iterations_afterwards=6)
# for i in range(3, 10):
# plt.plot(np.sum(data[f"{i}"][10:27], axis=0) / 100, label=f"{i}")
# for j in range(10):
# plt.plot(np.sum(data[f"{i}.{j}"][10:27], axis=0) / 100, label=f"{i}.{j}")
data = data["7"]
config = runner.configuration["cross_correlation"]
rv_range = config["rv_range"]
rv_points = config["rv_points"]
rv_step = (2 * rv_range + 1) / rv_points
rv = np.linspace(-rv_range, rv_range, rv_points)
plt.imshow(data, aspect="auto", origin="lower")
plt.xlabel("rv [km/s]")
xticks = plt.xticks()[0][1:-1]
xticks_labels = np.interp(xticks, np.arange(len(rv)), rv)
xticks_labels = [f"{x:.3g}" for x in xticks_labels]
plt.xticks(xticks, labels=xticks_labels)
plt.show()
datetime = spectra.datetime
phi = (datetime - planet.time_of_transit) / planet.period
phi = phi.to_value(1)
# We only care about the fraction
phi = phi % 1
c_light = c_light_ms * 1e-3
interpolator = interp1d(rv, data, kind="linear", bounds_error=False)
vsys_min, vsys_max = 0, 25
kp_min, kp_max = 0, 300
vsys = np.linspace(vsys_min, vsys_max, int((vsys_max-vsys_min+1)//rv_step))
kp = np.linspace(kp_min, kp_max, int((kp_max-kp_min+1)//rv_step))
combined = np.zeros((len(kp), len(vsys)))
for i, vs in enumerate(tqdm(vsys)):
for j, k in enumerate(tqdm(kp, leave=False)):
vp = vs + k * np.sin(2 * np.pi * phi)
# shifted = [np.interp(vp[i], rv, data[i], left=np.nan, right=np.nan) for i in range(len(vp))]
shifted = np.diag(interpolator(vp))
combined[j, i] = np.nansum(shifted)
# Normalize to the number of input spectra
combined /= data.shape[0]
combined /= combined.std()
# Normalize to median 0
median = np.nanmedian(combined)
combined -= median
kp_peak = combined.shape[0] // 2
kp_width = kp_peak
for i in range(3):
# Determine the peak position in vsys and kp
kp_width_int = int(np.ceil(kp_width))
mean_vsys = np.nanmean(combined[kp_peak-kp_width_int+1:kp_peak+kp_width_int+1, :], axis=0)
vsys_peak = np.argmax(mean_vsys)
# And then fit gaussians to determine the width
curve, vsys_popt = gaussfit(
vsys,
mean_vsys,
p0=[mean_vsys[vsys_peak] - np.min(mean_vsys), vsys[vsys_peak], 1, | np.min(mean_vsys) | numpy.min |
import matplotlib as mpl
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
from src.utils import get_points_to_plot
# Standard colors for uniform plots
COLOR_SILVER = '#C0C0C0'
COLOR_SILVER_RGB = (192 / 255, 192 / 255, 192 / 255) + (0.2,)
COLOR_INDIGO_RGB = (55 / 255, 0 / 255, 175 / 255) + (0.5,)
COLOR_CARNATION_RGB = np.array((247 / 255, 96 / 255, 114 / 255, 1)).reshape((1, -1))
CMAP = plt.cm.get_cmap('Blues')
def get_nb_points(data):
if data.xs is not None:
return len(data.xs)
elif data.A is not None:
return len(data.A)
else:
raise KeyError('What data are you using?')
def append_to_binary(number, new_digit):
return int(str(bin(number) + str(new_digit)), 2)
def get_next_id(current_id, direction):
if current_id == 0:
if direction == 'left':
return 1
else:
return 2
level = int(np.ceil(np.log2(current_id)))
if direction == 'left':
return current_id + 2 ** level + 1
else:
return current_id + 2 ** level + 2
def plot_dataset(data, colors, ax=None, eq_cuts=None, cmap=None, add_colorbar=True, pos=None):
if data.xs is not None:
ax = plot_dataset_metric(data.xs, data.cs, colors, eq_cuts, ax, cmap, add_colorbar)
elif data.G is not None:
ax, pos = plot_dataset_graph(data.G, data.ys, colors, ax, cmap, add_colorbar, pos)
return ax, pos
def add_colorbar_to_ax(ax, cmap):
cb = plt.colorbar(mpl.cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=0, vmax=1), cmap=cmap),
ax=ax, orientation='vertical')
cb.ax.set_title('p', y=-.05)
return ax
def plot_dataset_graph(G, ys, colors, ax, cmap, add_colorbar, pos):
if pos is None:
pos = get_position(G, ys)
nx.draw_networkx(G, pos=pos, ax=ax, node_color=colors, edge_color=COLOR_SILVER, with_labels=False,
edgecolors='black')
if add_colorbar:
ax = add_colorbar_to_ax(ax, cmap)
return ax, pos
def plot_dataset_metric(xs, cs, colors, eq_cuts, ax, cmap, add_colorbar):
plt.style.use('ggplot')
plt.ioff()
ax.tick_params(axis='x', colors=(0, 0, 0, 0))
ax.tick_params(axis='y', colors=(0, 0, 0, 0))
ax.set_aspect('equal', 'box')
ax.grid()
xs_embedded, cs_embedded = get_points_to_plot(xs, cs)
sc = ax.scatter(xs_embedded[:, 0], xs_embedded[:, 1], color=colors, vmin=0, vmax=1, edgecolor='black')
if eq_cuts is not None:
for eq in eq_cuts:
x, y = get_lines(xs, eq)
ax.plot(x, y, 'k--')
if add_colorbar:
ax = add_colorbar_to_ax(ax, cmap)
return ax
def labels_to_colors(ys, cmap):
nb_points = len(ys)
colors = np.zeros((nb_points, 4))
normalize_ys = mpl.colors.Normalize(vmin=0, vmax=np.max(ys))
for y in np.unique(ys):
idx_current = (ys == y).nonzero()[0]
color = cmap(normalize_ys(y))
colors[idx_current, :] = np.array(color).reshape((1, -1))
return colors
def plot_soft_predictions(data, contracted_tree, eq_cuts=None, id_node=0, path=None):
plt.style.use('ggplot')
plt.ioff()
cmap_groundtruth = plt.cm.get_cmap('tab10')
cmap_heatmap = plt.cm.get_cmap('Blues')
if path is not None:
output_path = path
output_path.mkdir(parents=True, exist_ok=True)
if data.ys is not None:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 10))
colors = labels_to_colors(data.ys, cmap=cmap_groundtruth)
ax, pos = plot_dataset(data, colors, eq_cuts=eq_cuts, ax=ax, add_colorbar=False)
fig.savefig(output_path / f"groundtruth.svg")
plt.close(fig)
plot_soft_prediction_node(data, contracted_tree.root, eq_cuts=eq_cuts, id_node=0, cmap=cmap_heatmap, path=path,
pos=pos)
def plot_soft_prediction_node(data, node, eq_cuts, id_node, cmap, path, pos):
colors = cmap(node.p)
if eq_cuts is not None:
if len(node.characterizing_cuts) != 0:
id_characterizing_cuts = list(node.characterizing_cuts.keys())
eq_characterizing_cuts = eq_cuts[id_characterizing_cuts]
else:
eq_characterizing_cuts = []
else:
eq_characterizing_cuts = []
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 10))
plot_dataset(data, colors, eq_cuts=eq_characterizing_cuts, ax=ax, cmap=cmap, pos=pos)
fig.savefig(path / f"node_nb_{id_node:02d}.svg")
plt.close(fig)
if node.left_child is not None:
id_left = get_next_id(id_node, 'left')
plot_soft_prediction_node(data, node.left_child, eq_cuts, id_left, cmap, path, pos=pos)
if node.right_child is not None:
id_right = get_next_id(id_node, 'right')
plot_soft_prediction_node(data, node.right_child, eq_cuts, id_right, cmap, path, pos=pos)
def plot_hard_predictions(data, ys_predicted, path=None):
cmap_groundtruth = plt.cm.get_cmap('BuPu')
cmap_predictions = plt.cm.get_cmap('OrRd')
if path is not None:
output_path = path
output_path.mkdir(parents=True, exist_ok=True)
if data.ys is not None:
fig, (ax_true, ax_predicted) = plt.subplots(nrows=1, ncols=2, figsize=(20, 10))
colors_true = labels_to_colors(data.ys, cmap=cmap_groundtruth)
ax_true = plot_dataset(data, colors_true, ax=ax_true, add_colorbar=False)
else:
fig, ax_predicted = plt.subplots(nrows=1, ncols=1, figsize=(20, 10))
colors_predicted = labels_to_colors(ys_predicted, cmap=cmap_predictions)
ax_predicted = plot_dataset(data, colors_predicted, ax=ax_predicted, add_colorbar=False)
fig.savefig(output_path / f"hard_clustering.svg")
plt.close(fig)
def get_position(G, ys):
if ys is not None:
pos = nx.random_layout(G)
ncls = np.max(ys) + 1
xoff = np.sin(2 * np.pi * ys / ncls) * 2
yoff = np.cos(2 * np.pi * ys / ncls) * 2
for v in G:
pos[v][0] += xoff[v]
pos[v][1] += yoff[v]
pos = nx.spring_layout(G, pos=pos, iterations=1)
elif nx.is_connected(G):
pos = nx.spectral_layout(G)
pos = nx.spring_layout(G, pos=pos, k=.5, iterations=100)
else:
pos = nx.kamada_kawai_layout(G)
pos = nx.spring_layout(G, pos=pos, k=.5, iterations=100)
return pos
def plot_cuts(data, cuts, nb_cuts_to_plot, path):
plt.style.use('ggplot')
plt.ioff()
if path is not None:
path = path / 'cuts'
path.mkdir(parents=True, exist_ok=True)
value_cuts = cuts.values
order_cuts = cuts.costs
eq_cuts = cuts.equations
nb_cuts_to_plot = min(nb_cuts_to_plot, len(value_cuts))
pos = None
for i in np.arange(nb_cuts_to_plot):
eq = [eq_cuts[i]] if eq_cuts is not None else None
fig, pos = plot_cut(data, cut=value_cuts[i], order=order_cuts[i], eq=eq, pos=pos)
if path is not None:
fig.savefig(path / f"cut number {i}.svg")
plt.close(fig)
def get_lines(xs, eq):
min_x, max_x = np.min(xs[:, 0]), np.max(xs[:, 0])
min_y, max_y = np.min(xs[:, 1]), np.max(xs[:, 1])
x_range = np.linspace(min_x, max_x, 100)
y_range = np.linspace(min_y, max_y, 100)
if eq[0] == 0:
x = x_range
y = np.zeros_like(x_range)
y.fill(-eq[2] / eq[1])
elif eq[1] == 0:
x = | np.zeros_like(y_range) | numpy.zeros_like |
from math import sqrt
import time
import numpy as np
import itertools
import heapq
from pyspark.sql import Row
'''
Author:Reza
'''
def calculate_similarity(alpha_user_did_score, c2):
# we don't need did_list in dataframe
def __helper(did_list, user_score_matrix, top_n_similar_user_list, c1_list):
user_score_matrix = np.array(user_score_matrix)
m = user_score_matrix.shape[1]
alpha_dids, alpha_score_matrix = alpha_user_did_score
alpha_score_matrix = np.array(alpha_score_matrix)
cross_mat = np.matmul(user_score_matrix, alpha_score_matrix.transpose())
similarity_list = np.sqrt(m) - np.sqrt(np.maximum(np.expand_dims(c1_list, 1) + c2 - (2 * cross_mat), 0.0))
result = []
for did, cosimilarity, top_n_similar_user in itertools.izip(did_list, similarity_list, top_n_similar_user_list):
user_score_s = list(itertools.izip(alpha_dids, cosimilarity.tolist()))
user_score_s.extend(top_n_similar_user)
user_score_s = heapq.nlargest(top_n_value, user_score_s, key=lambda x: x[1])
result.append(user_score_s)
return result
return __helper
top_n_value = 3
block_user = [Row(did_list=[u'1', u'2'], score_matrix=[[0.10000000149011612, 0.800000011920929, 0.8999999761581421], [
0.10000000149011612, 0.800000011920929, 0.8999999761581421]], c1_list=[1.4600000381469727, 1.4600000381469727])]
block_user_did_score = (block_user[0]['did_list'], block_user[0]['score_matrix'])
c2 = np.array(block_user[0]['c1_list'])
user_score_matrix_1_2 = [[0.1, 0.8, 0.9], [0.1, 0.8, 0.9]]
top_n_similar_user_1_2 = [[], []]
c1_list_1_2 = [1.4600000381469727, 1.4600000381469727]
top_n_similar_user_1_2 = calculate_similarity(block_user_did_score, c2)([1, 2], user_score_matrix_1_2, top_n_similar_user_1_2, c1_list_1_2)
# print(top_n_similar_user_1_2)
user_score_matrix_3_4 = [[0.1, 0.8, 0.9], [0.1, 0.8, 0.9]]
top_n_similar_user_3_4 = [[], []]
c1_list_3_4 = [1.4600000381469727, 1.4600000381469727]
top_n_similar_user_3_4 = calculate_similarity(block_user_did_score, c2)([3, 4], user_score_matrix_3_4, top_n_similar_user_3_4, c1_list_3_4)
# print(top_n_similar_user_3_4)
# second block user/cross user
block_user = [Row(did_list=[u'3', u'4'], score_matrix=[[0.10000000149011612, 0.800000011920929, 0.8999999761581421], [
0.10000000149011612, 0.800000011920929, 0.8999999761581421]], c1_list=[1.4600000381469727, 1.4600000381469727])]
block_user_did_score = (block_user[0]['did_list'], block_user[0]['score_matrix'])
c2 = | np.array(block_user[0]['c1_list']) | numpy.array |
#!/usr/bin/env python
__author__ = '<NAME>'
#========================================================================
import os, sys
import copy
import time
import uuid
import pickle
import subprocess
import numpy as np
import tensorflow as tf
from gryffin.utilities import Logger
from gryffin.utilities.decorators import thread
#========================================================================
class DescriptorGenerator(Logger):
eta = 1e-3
max_iter = 10**3
def __init__(self, config):
self.config = config
self.is_generating = False
self.exec_name = '%s/descriptor_generator/generation_process.py' % self.config.get('home')
# define registers
self.auto_gen_descs = {}
self.comp_corr_coeffs = {}
self.gen_descs_cov = {}
self.min_corrs = {}
self.reduced_gen_descs = {}
self.weights = {}
self.sufficient_indices = {}
@thread
def single_generate(self, descs, objs, feature_index, result_dict = None):
# collect all relevant properties
sim_dict = {}
for prop in dir(self):
if callable(getattr(self, prop)) or prop.startswith(('__', 'W', 'config')): continue
sim_dict[prop] = getattr(self, prop)
sim_dict['num_samples'] = descs.shape[0]
sim_dict['num_descs'] = descs.shape[1]
sim_dict['descs'] = descs
sim_dict['objs'] = objs
sim_dict['grid_descs'] = self.config.feature_descriptors[feature_index]
identifier = str(uuid.uuid4())[:8]
config_name = '%s/descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
with open(config_name, 'wb') as content:
pickle.dump(sim_dict, content)
# FNULL = open(os.devnull, 'w')
# subprocess.call('%s %s' % (self.exec_name, config_name), shell = True, stdout = FNULL, stderr = subprocess.STDOUT)
subprocess.call('python %s %s' % (self.exec_name, config_name), shell = True)
print('SUBMITTED DESC GENERATION')
results_name = '%s/completed_descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
# wait for results to be written
while not os.path.isfile(results_name):
time.sleep(0.05)
current_size = 0
while current_size != os.path.getsize(results_name):
current_size = os.path.getsize(results_name)
time.sleep(0.05)
time.sleep(0.2)
try:
with open(results_name, 'rb') as content:
results = pickle.load(content)
except EOFError:
time.sleep(2)
with open(results_name, 'rb') as content:
results = pickle.load(content)
self.min_corrs[feature_index] = results['min_corrs']
self.auto_gen_descs[feature_index] = results['auto_gen_descs']
self.comp_corr_coeffs[feature_index] = results['comp_corr_coeffs']
self.gen_descs_cov[feature_index] = results['gen_descs_cov']
self.reduced_gen_descs[feature_index] = results['reduced_gen_descs']
self.weights[feature_index] = results['weights']
self.sufficient_indices[feature_index] = results['sufficient_indices']
# print("WEIGHTS", feature_index, self.weights[feature_index])
# print("REDUCED_DESCS", feature_index, results['reduced_gen_descs'].shape)
result_dict[feature_index] = results['reduced_gen_descs']
os.remove(config_name)
os.remove(results_name)
@thread
def generate(self, obs_params, obs_objs):
import time
start = time.time()
self.is_generating = True
result_dict = {}
feature_types = self.config.feature_types
feature_descriptors = self.config.feature_descriptors
# print('FEATURE DESCRIPTORS', feature_descriptors, np.array(feature_descriptors[0]).shape)
# print('*'*10)
# for element in feature_descriptors:
# print(element)
# print('*'*10)
for feature_index, feature_options in enumerate(self.config.feature_options):
if feature_types[feature_index] == 'continuous':
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index] is None:
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index].shape[1] == 1:
self.weights[feature_index] = np.array([[1.]])
self.reduced_gen_descs[feature_index] = feature_descriptors[feature_index]
result_dict[feature_index] = feature_descriptors[feature_index]
continue
sampled_params = obs_params[:, feature_index].astype(np.int32)
sampled_descriptors = feature_descriptors[feature_index][sampled_params]
sampled_objs = np.reshape(obs_objs, (len(obs_objs), 1))
self.single_generate(sampled_descriptors, sampled_objs, feature_index, result_dict)
# avoid parallel execution if not desired
if not self.config.get('parallel'):
if feature_types[feature_index] == 'continuous': continue
while not feature_index in result_dict:
time.sleep(0.1)
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous': continue
while not feature_index in result_dict:
time.sleep(0.1)
gen_feature_descriptors = [result_dict[feature_index] for feature_index in range(len(result_dict.keys()))]
self.gen_feature_descriptors = gen_feature_descriptors
self.is_generating = False
end = time.time()
self.desc_gen_time = end - start
def get_descriptors(self):
while self.is_generating:
time.sleep(0.1)
if hasattr(self, 'gen_feature_descriptors'):
print('[TIME: ', self.desc_gen_time, ' (descriptor generation)')
return self.gen_feature_descriptors
else:
return self.config.feature_descriptors
def get_summary(self):
summary = {}
feature_types = self.config.feature_types
if not hasattr(self, 'gen_feature_descriptors'):
for feature_index in range(len(self.config.feature_options)):
contribs = {}
if feature_types[feature_index] == 'continuous': continue
feature_descriptors = self.config.feature_descriptors[feature_index]
if feature_descriptors is None: continue
for desc_index in range(feature_descriptors.shape[1]):
desc_summary_dict = {}
desc_summary_dict['relevant_given_descriptors'] = np.arange(len(feature_descriptors[:, desc_index]))
desc_summary_dict['given_descriptor_contributions'] = np.ones(len(feature_descriptors[:, desc_index]))
contribs['descriptor_%d' % desc_index] = copy.deepcopy(desc_summary_dict)
summary['feature_%d' % feature_index] = copy.deepcopy(contribs)
return summary
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous': continue
weights = self.weights[feature_index]
reduced_gen_descs = self.reduced_gen_descs[feature_index]
sufficient_indices = self.sufficient_indices[feature_index]
if weights is None: continue
if len(sufficient_indices) == 0: continue
# normalize weights
normed_weights = np.empty(weights.shape)
for index, weight_elements in enumerate(weights):
normed_weights[index] = weight_elements / np.sum(np.abs(weight_elements))
# identify contributing indices
contribs = {}
# for new_desc_index in range(reduced_gen_descs.shape[1]):
for new_desc_index in sufficient_indices:
desc_summary_dict = {}
relevant_weights = normed_weights[new_desc_index]
sorting_indices = np.argsort(np.abs(relevant_weights))
cumulative_sum = np.cumsum( | np.abs(relevant_weights[sorting_indices]) | numpy.abs |
import time
import math
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from collections import defaultdict
import ethologger.utils.auxiliary as aux
mpl.rcParams["agg.path.chunksize"] = 10000
class VisualizationWrapper:
def __init__(self, fps=30, tick_stepsize=1.5, time_scale=3600):
self.palette = "tab20"
self.fps = fps
self.time_scale = time_scale
self.tick_stepsize = tick_stepsize
self.figsize = (30, 30)
self.x_grid = {"alpha": 0.5, "which": "major"}
self.y_grid = {"alpha": 0.5, "which": "major"}
self.xtick_params = {
"direction": "out",
"length": 6,
"width": 2,
"labelrotation": 0,
"labelsize": 30,
}
self.ytick_params = {
"direction": "out",
"length": 6,
"width": 2,
"labelrotation": 0,
"labelsize": 30,
}
self.fontsize = {
"tick": 30,
"label": 30,
"title": 40,
"legend": 30,
"legend_title": 35,
}
sns.set_context("talk")
def palette_values(self, num_values):
return np.array(sns.color_palette(self.palette, num_values))
def _set_title(self, ax, title_=None):
if title_ is not None:
ax.set_title(title_, fontsize=self.fontsize["title"])
return ax
def _set_suptitle(self, fig, suptitle_=None):
if suptitle_ is not None:
fig.suptitle(suptitle_, fontsize=self.fontsize["title"] + 5)
return fig
def _set_yaxis(self, ax, ylabel_=None, yticks_=None, yticklabels_=None):
if ylabel_ is not None:
ax.set_ylabel(ylabel_, fontsize=self.fontsize["label"])
if yticks_ is not None:
ax.set_yticks(yticks_)
if yticklabels_ is not None:
ax.set_yticklabels(yticklabels_, fontsize=self.fontsize["tick"])
return ax
def _set_xaxis(self, ax, xlabel_=None, xticks_=None, xticklabels_=None):
if xlabel_ is not None:
ax.set_xlabel(xlabel_, fontsize=self.fontsize["label"])
if xticks_ is not None:
ax.set_xticks(xticks_)
if xticklabels_ is not None:
ax.set_xticklabels(xticklabels_, fontsize=self.fontsize["tick"])
return ax
def _set_all(
self,
fig,
ax,
xlabel_=None,
ylabel_=None,
xticks_=None,
yticks_=None,
xticklabels_=None,
yticklabels_=None,
title_=None,
suptitle_=None,
):
self._set_xaxis(ax, xlabel_=xlabel_, xticks_=xticks_, xticklabels_=xticklabels_)
self._set_yaxis(ax, ylabel_=ylabel_, yticks_=yticks_, yticklabels_=yticklabels_)
self._set_title(ax, title_=title_)
self._set_suptitle(fig, suptitle_=suptitle_)
ax.tick_params(axis="x", **self.xtick_params)
ax.tick_params(axis="y", **self.ytick_params)
return fig, ax
def _get_xtick_expt(self, num_time_point):
return (
np.arange(
0,
num_time_point,
int((self.fps * self.time_scale) * self.tick_stepsize),
)
/ (self.fps * self.time_scale)
)
@staticmethod
def _set_gender_defined_colors(fig):
m_palette_idx_dict = {}
f_palette_idx_dict = {}
num_m_palette = 0
num_f_palette = 0
for ax in fig.axes:
lines = ax.get_lines()
for line in lines:
name = line.get_label()
if "FlyM" in name and name not in m_palette_idx_dict.keys():
m_palette_idx_dict[name] = num_m_palette
num_m_palette += 1
elif "FlyF" in name and name not in f_palette_idx_dict.keys():
f_palette_idx_dict[name] = num_f_palette
num_f_palette += 1
elif (
name not in f_palette_idx_dict.keys()
and name not in m_palette_idx_dict.keys()
):
raise ValueError(
f"Given expt. name {name} are not compatible with naming convention."
)
else:
pass
m_palette = sns.color_palette("magma", num_m_palette)
f_palette = sns.color_palette("crest", num_f_palette)
lines_labels = []
for ax in fig.axes:
lines = ax.get_lines()
for line in lines:
name = line.get_label()
if "FlyM" in name:
line.set_color(m_palette[m_palette_idx_dict[name]])
elif "FlyF" in name:
line.set_color(f_palette[f_palette_idx_dict[name]])
else:
raise ValueError(
f"Given expt. name {name} are not compatible with naming convention."
)
lines_labels.append(ax.get_legend_handles_labels())
labels = []
handles = []
for (hnd_list, lbl_list) in lines_labels:
for idx, lbl in enumerate(lbl_list):
if lbl not in labels:
labels.append(lbl)
handles.append(hnd_list[idx])
fig_legend = fig.legend(
handles,
labels,
loc="center left",
bbox_to_anchor=(1, 0.5),
fontsize=30,
)
for line in fig_legend.get_lines():
line.set_linewidth(8.0)
return fig
@staticmethod
def _get_opt_label_name(opt, label="", label_name_dict=None, lw=32):
if label_name_dict is None:
if opt == "annotation":
txt = "Annotation"
elif opt == "clustering":
txt = "Cluster"
else:
txt = "Label"
else:
txt = ""
if label_name_dict is not None and label != "":
txt += f"{label_name_dict[label]}"
elif label_name_dict is None and label != "":
txt += f"-{label}"
else:
pass
title_ = aux.txt_wrap(txt, lw=lw)
return title_
@staticmethod
def _get_legend_patch(palette, label_name_dict=None):
if label_name_dict is not None:
legend_tn = [
patches.Patch(color=(palette[i]), label=str(lbl) + ":" + name)
for i, (lbl, name) in enumerate(label_name_dict.items())
]
else:
legend_tn = [
patches.Patch(color=clr, label=i) for i, clr in enumerate(palette)
]
return legend_tn
@staticmethod
def _get_square_length(plot_dict):
len_dict = len(plot_dict)
ncols = int(math.sqrt(len_dict))
nrows = int(math.ceil(len_dict / ncols))
return nrows, ncols
@staticmethod
def _set_hour_to_HM_formatter(fig):
for ax in fig.axes:
formatter = mpl.ticker.FuncFormatter(
lambda hour, x: aux.get_HM_from_hour(hour)
)
ax.xaxis.set_major_formatter(formatter)
return fig
@staticmethod
def _set_sec_to_MS_formatter(fig):
for ax in fig.axes:
formatter = mpl.ticker.FuncFormatter(
lambda sec, x: time.strftime("%M:%S", time.gmtime(sec))
)
ax.xaxis.set_major_formatter(formatter)
return fig
def _get_palette(self, values, c=None):
if c is not None:
assert isinstance(c, np.ndarray)
c = c.astype(int)
num_classes = np.max(c) + 1
palette = np.array(sns.color_palette(self.palette, num_classes))
palette_ = palette[c]
elif isinstance(values, dict):
if "hue" in values.keys():
palette = np.array(
sns.color_palette(self.palette, len(set(values["hue"])))
)
palette_ = palette[[i for i, _ in enumerate(values["hue"])]]
else:
palette = np.array(sns.color_palette(self.palette, len(values["x"])))
palette_ = palette[[i for i, _ in enumerate(values["x"])]]
else:
palette_ = self.palette
return palette_
# Plotting methods;
def scatterplot(self, values, c=None, label_name_dict=None, **kwargs):
third_dim = values.shape[1] > 2
if c is not None:
assert isinstance(c, np.ndarray)
c = c.astype(int)
fig = plt.figure(figsize=self.figsize)
if third_dim:
ax = fig.add_subplot(111, projection="3d")
else:
ax = fig.add_subplot(aspect="equal")
if c is None:
if third_dim:
_ = ax.scatter(
(values[:, 0]),
(values[:, 1]),
(values[:, 2]),
rasterized=True,
**kwargs,
)
else:
_ = ax.scatter(
(values[:, 0]), (values[:, 1]), rasterized=True, **kwargs
)
else:
if label_name_dict is not None:
num_classes = len(label_name_dict.keys())
else:
num_classes = np.max(c) + 1
palette = np.array(sns.color_palette(self.palette, num_classes))
if third_dim:
_ = ax.scatter(
(values[:, 0]),
(values[:, 1]),
(values[:, 2]),
c=(palette[c]),
**kwargs,
)
else:
_ = ax.scatter(
(values[:, 0]),
(values[:, 1]),
c=(palette[c]),
rasterized=True,
**kwargs,
)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height * 0.9])
if label_name_dict is not None:
legend_tn = self.__class__._get_legend_patch(palette, label_name_dict)
else:
legend_tn = self.__class__._get_legend_patch(palette)
lgnd = ax.legend(
loc="upper right",
bbox_to_anchor=(1.25, 0.75),
handles=legend_tn,
fontsize=self.fontsize["legend"],
title="clusters",
title_fontsize=self.fontsize["legend_title"],
)
ax.add_artist(lgnd)
for lbl in range(0, num_classes):
if lbl in c:
mu_x = np.mean(values[(c == lbl, 0)])
mu_y = np.mean(values[(c == lbl, 1)])
if third_dim:
mu_z = np.mean(values[(c == lbl, 2)])
ax.text(
mu_x,
mu_y,
mu_z,
(str(lbl)),
size=20,
zorder=1,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
)
else:
ax.annotate(
(str(lbl)),
(mu_x, mu_y),
size=20,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
)
return fig, ax
def spectogram(
self,
coefs,
frequencies,
title="Power Spectrum",
xlabel="Time (hour)",
ylabel="period (sec)",
**kwargs,
):
cmap_ = plt.cm.seismic
periods = 1.0 / frequencies
time = np.arange(0, coefs.shape[0]) / (self.fps * self.time_scale)
xticks = self._get_xtick_expt(coefs.shape[0])
xticklabels_ = xticks
yticklabels_ = 2 ** np.arange(
np.ceil(np.log2(periods.min())), np.ceil(np.log2(periods.max()))
)
yticks = np.log2(yticklabels_)
fig = plt.figure(figsize=self.figsize)
ax = fig.add_subplot()
im = ax.contourf(
time,
np.log2(periods),
np.swapaxes(np.log2(coefs + 1), 0, 1),
cmap=cmap_,
)
fig, ax = self._set_all(
fig,
ax,
title_=title,
xlabel_=xlabel,
xticklabels_=xticklabels_,
ylabel_=ylabel,
yticks_=yticks,
yticklabels_=yticklabels_,
)
cbar_ax = fig.add_axes([0.95, 0.5, 0.03, 0.25])
fig.colorbar(im, cax=cbar_ax, orientation="vertical")
return fig, ax
def lineplot(
self,
values,
labels=None,
log_scale=False,
title="Feature Value",
ylabel="Value",
xlabel="Time (hour)",
ax=None,
legend=True,
**kwargs,
):
multiple_values = values.ndim > 1
if ax is None:
fig = plt.figure(figsize=self.figsize)
ax = fig.add_subplot()
else:
fig = ax.figure
xticks_ = self._get_xtick_expt(values.shape[0])
xticklabels_ = xticks_
time = np.arange(0, values.shape[0]) / (self.fps * self.time_scale)
if multiple_values:
palette = np.array(sns.color_palette(self.palette, values.shape[1]))
for i in range(values.shape[1]):
try:
ax.plot(time, values[:, i], label=labels[i], c=palette[i], **kwargs)
except (IndexError, TypeError):
ax.plot(time, values[:, i], **kwargs)
else:
try:
ax.plot(time, values, label=labels, **kwargs)
except TypeError:
ax.plot(time, values, **kwargs)
if legend and labels is not None and multiple_values:
legend_tn = [
patches.Patch(color=(palette[i]), label=labels[i])
for i in range(0, values.shape[1])
]
lgnd = ax.legend(
loc="center left",
bbox_to_anchor=(1, 0.5),
handles=legend_tn,
fontsize=self.fontsize["legend"],
)
ax.add_artist(lgnd)
elif legend and labels is not None:
ax.legend()
else:
pass
fig, ax = self._set_all(
fig,
ax,
title_=title,
xlabel_=xlabel,
xticks_=xticks_,
xticklabels_=xticklabels_,
ylabel_=ylabel,
)
ax.xaxis.grid(**self.x_grid)
ax.yaxis.grid(**self.y_grid)
if log_scale:
ax.set_yscale("log")
return fig, ax
def heatmap(
self,
values,
title=None,
ylabel=None,
xlabel=None,
**kwargs,
):
fig = plt.figure(figsize=self.figsize)
ax = fig.add_subplot()
ax = sns.heatmap(values, ax=ax, **kwargs)
fig, ax = self._set_all(
fig, ax, suptitle_=title, xlabel_=xlabel, ylabel_=ylabel
)
return fig, ax
def clustermap(
self,
values,
title=None,
ylabel=None,
xlabel=None,
**kwargs,
):
cg = sns.clustermap(values, figsize=self.figsize, **kwargs)
ax = cg.ax_heatmap
self._set_all(cg.fig, ax, suptitle_=title, xlabel_=xlabel, ylabel_=ylabel)
return cg, ax
def barplot(
self,
values,
c=None,
title=None,
ylabel=None,
xlabel=None,
xticklabels_=None,
ax=None,
**kwargs,
):
fig, ax = self.aux_snsplot(
values,
c=c,
title=title,
ylabel=ylabel,
xlabel=xlabel,
xticklabels_=xticklabels_,
ax=ax,
function=sns.barplot,
**kwargs,
)
return fig, ax
def boxplot(
self,
values,
c=None,
title=None,
ylabel=None,
xlabel=None,
xticklabels_=None,
ax=None,
**kwargs,
):
fig, ax = self.aux_snsplot(
values,
c=c,
title=title,
ylabel=ylabel,
xlabel=xlabel,
xticklabels_=xticklabels_,
ax=ax,
function=sns.boxplot,
**kwargs,
)
return fig, ax
def boxenplot(
self,
values,
c=None,
title=None,
ylabel=None,
xlabel=None,
xticklabels_=None,
ax=None,
**kwargs,
):
fig, ax = self.aux_snsplot(
values,
c=c,
title=title,
ylabel=ylabel,
xlabel=xlabel,
xticklabels_=xticklabels_,
ax=ax,
function=sns.boxenplot,
**kwargs,
)
return fig, ax
def aux_snsplot(
self,
values,
c=None,
title=None,
ylabel=None,
xlabel=None,
xticklabels_=None,
ax=None,
function=None,
**kwargs,
):
assert function is not None
palette_ = self._get_palette(values, c)
if ax is None:
fig = plt.figure(figsize=self.figsize)
ax = fig.add_subplot()
else:
fig = ax.figure
if isinstance(values, dict):
ax = function(
ax=ax,
x=values["x"],
y=values["y"],
hue=values.get("hue", None),
palette=palette_,
**kwargs,
)
xticks = None
else:
xticks = np.arange(values.shape[0])
ax = function(ax=ax, x=xticks, y=values, palette=palette_, **kwargs)
ax = self._set_xaxis(ax, xticks_=xticks, xticklabels_=xticklabels_)
fig, ax = self._set_all(fig, ax, title_=title, xlabel_=xlabel, ylabel_=ylabel)
return fig, ax
def feature_trajectory_plot(
self,
values,
value_names_dict,
label,
name,
opt="",
label_name_dict=None,
**kwargs,
):
nrows, ncols = self.__class__._get_square_length(value_names_dict)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)
time = np.arange(0, values.shape[0]) / (self.fps * self.time_scale)
values_aligned = values[:, :, :] - values[0, :, :]
for i, (_, val_name) in enumerate(value_names_dict.items()):
for j in range(values_aligned.shape[-1]):
axes[i // ncols, i % ncols].plot(
time, values_aligned[:, i, j], c="midnightblue", **kwargs
)
fig, axes[i // ncols, i % ncols] = self._set_all(
fig,
axes[i // ncols, i % ncols],
title_=aux.txt_wrap(f"{val_name} Trajectories", lw=20),
ylabel_="Feature Value",
xlabel_="Time (seconds)",
)
fig = self.__class__._set_sec_to_MS_formatter(fig)
lbl_name = self.__class__._get_opt_label_name(
opt, label, label_name_dict=label_name_dict
)
fig = self._set_suptitle(fig, f"Trajectories of {lbl_name} of {name}")
fig.tight_layout()
return fig, axes
def label_hspan_feature_plot(
self,
frame_values,
frame_labels,
value_names,
span_palette,
name,
annotate_hspan=True,
shift=0,
title="Feature Values",
ylabel="Value",
xlabel="Time (hour)",
ignore_label=None,
label_name_dict=None,
opt="",
**kwargs,
):
if ignore_label is None:
ignore_label = []
self.palette = "tab20b"
fig, ax = self.lineplot(
frame_values,
labels=value_names,
title=title,
xlabel=xlabel,
ylabel=ylabel,
**kwargs,
)
change_points = aux.cont_intvls(frame_labels)
time_step = self.fps * self.time_scale
for i in range(1, change_points.shape[0]):
label = frame_labels[change_points[i - 1]]
if label in ignore_label:
alpha = 0
else:
alpha = 1
ax.axvspan(
change_points[i - 1] / time_step,
change_points[i] / time_step,
facecolor=span_palette[label],
alpha=alpha,
)
if annotate_hspan:
midpoint = (change_points[i] + change_points[i - 1]) // 2
y = np.mean(frame_values[midpoint, :])
rng = np.random.default_rng()
y_jittered = y * (1 + rng.uniform(0, 1) * 0.005) - rng.uniform(0, 300)
ax.annotate(text=str(label), xy=(midpoint / time_step, y_jittered))
if label_name_dict is not None:
legend_tn = self.__class__._get_legend_patch(span_palette, label_name_dict)
else:
legend_tn = self.__class__._get_legend_patch(span_palette)
lgnd_title = f"{self.__class__._get_opt_label_name(opt)}s"
lgnd = ax.legend(
loc="center left",
bbox_to_anchor=(-0.45, 0.5),
handles=legend_tn,
fontsize=self.fontsize["legend"],
title=lgnd_title,
title_fontsize=self.fontsize["legend_title"],
)
ax.add_artist(lgnd)
ax.set_xticklabels(ax.get_xticks() + shift / time_step)
fig = self.__class__._set_hour_to_HM_formatter(fig)
xticklabels_ = ax.get_xticklabels()
start = aux.get_HM_from_hour(xticklabels_[0].get_text())
end = aux.get_HM_from_hour(xticklabels_[-1].get_text())
intvl = f"{start}-{end}"
suptitle_ = f"{opt.title()} Label Spanned Feature Plots ~ {intvl} of {name}"
fig = self._set_suptitle(fig, suptitle_)
return fig, ax
def coverage_clustermap(
self,
coverage_dict,
title=None,
xlabel=None,
ylabel=None,
xticklabels_=1,
yticklabels_=1,
):
heatval = np.zeros(
(len(coverage_dict.keys()), len(list(coverage_dict.values())[0]))
)
for lbl, values_dict in coverage_dict.items():
for lbl_v, val in values_dict.items():
heatval[lbl, lbl_v] = val
cg, ax = self.clustermap(
heatval,
ylabel=ylabel,
xlabel=xlabel,
xticklabels=xticklabels_,
yticklabels=yticklabels_,
cbar_kws={"shrink": 0.5},
tree_kws={"linewidths": 5},
cmap="mako",
cbar_pos=(-0.07, 0.32, 0.03, 0.2),
)
self._set_suptitle(cg.fig, title)
return cg, ax
def label_occupation_barplot(
self,
occupation_dict,
name,
title=None,
xlabel=None,
ylabel=None,
xticklabels_=None,
):
occupation_label = {}
for i, (key, val) in enumerate(occupation_dict.items()):
occupation_label[i] = val.get(name, 0) / self.fps * 60
occ_time = np.zeros(len(occupation_label.keys()))
for key, val in occupation_label.items():
occ_time[key] = val
c = np.array(list(occupation_label.keys()))
return self.barplot(
occ_time,
c=c,
xticklabels_=xticklabels_,
xlabel=xlabel,
ylabel=ylabel,
title=title,
)
def evolution_rate_plot(self, evolution_rate_dict, opt="", label_name_dict=None):
nrows, ncols = self.__class__._get_square_length(evolution_rate_dict)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)
for idx, (lbl, c_evol_dict) in enumerate(evolution_rate_dict.items()):
evol_rates = np.full(
(
max(rate.shape[0] for rate in c_evol_dict.values()),
len(c_evol_dict.keys()),
),
np.nan,
)
labels_expt = []
for i, (name, rate) in enumerate(c_evol_dict.items()):
evol_rates[0 : rate.shape[0], i] = rate
labels_expt.append(name)
title_ = self.__class__._get_opt_label_name(
opt, lbl, label_name_dict=label_name_dict
)
row = idx // ncols
col = idx % ncols
self.tick_stepsize = 4
self.xtick_params["labelrotation"] = 90
fig, axes[row, col] = self.lineplot(
evol_rates,
labels=labels_expt,
title=title_,
xlabel="Time (hour)",
ax=axes[row, col],
legend=False,
lw=3,
)
fig = self.__class__._set_gender_defined_colors(fig)
fig = self.__class__._set_hour_to_HM_formatter(fig)
fig = self._set_suptitle(fig, f"Evolution Rates for {opt.title()}")
fig.tight_layout()
return fig, axes
def observation_time_histogram(
self,
bout_details_dict,
name=None,
opt="",
label_name_dict=None,
):
nrows, ncols = self.__class__._get_square_length(bout_details_dict)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)
for idx, (lbl, bout_details) in enumerate(bout_details_dict.items()):
durations = []
mid_bout = []
row = idx // ncols
col = idx % ncols
for detail_dict in bout_details:
durations.append(detail_dict["duration"] / self.fps)
mid_bout.append(
(detail_dict["end"] + detail_dict["start"]) / 2 / self.fps / 60 / 60
)
title_ = self.__class__._get_opt_label_name(
opt, lbl, label_name_dict=label_name_dict, lw=40
)
axes[row, col].hist(
mid_bout, 10, alpha=0.75, density=True, edgecolor="black"
)
axes[row, col] = self._set_title(axes[row, col], title_)
axes[row, col] = self._set_xaxis(axes[row, col], "Time (hour)")
if name is None:
suptitle_ = aux.txt_wrap(f"{opt.title()} Observation Times")
else:
suptitle_ = aux.txt_wrap(f"{opt.title()} Observation Times of {name}")
fig = self.__class__._set_hour_to_HM_formatter(fig)
fig = self._set_suptitle(fig, suptitle_)
fig.tight_layout()
return fig, axes
def feature_distn_boxenplot(
self,
frame_values,
frame_labels,
value_names_dict,
name=None,
label_name_dict=None,
opt="",
):
values = defaultdict(list)
nrows, ncols = self.__class__._get_square_length(value_names_dict)
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)
xlabel_ = self.__class__._get_opt_label_name(opt)
for j, (_, val_name) in enumerate(value_names_dict.items()):
row = j // ncols
col = j % ncols
values["y"] = frame_values[:, j]
values["x"] = frame_labels
unique_lbls = np.unique(frame_labels)
if label_name_dict is None:
xticklabels_ = sorted(unique_lbls)
else:
xticklabels_ = [label_name_dict[lb] for lb in sorted(unique_lbls)]
fig, axes[row, col] = self.boxenplot(
values,
title=aux.txt_wrap(f"{val_name}", lw=20),
ylabel="Value",
xlabel=xlabel_,
xticklabels_=xticklabels_,
showfliers=False,
ax=axes[row, col],
)
axes[row, col].tick_params(
axis="x", labelsize=math.ceil(self.fontsize["tick"] / 2)
)
if name is not None:
suptitle_ = aux.txt_wrap(
f"{opt.title()} Feature Value Boxen-plots of {name}"
)
else:
suptitle_ = aux.txt_wrap(f"{opt.title()} Feature Value Boxen-plots")
fig = self._set_suptitle(fig, suptitle_)
fig.tight_layout()
return fig, axes
def feature_distn_clustermap(
self,
frame_values,
frame_labels,
value_names_dict,
name=None,
label_name_dict=None,
opt="",
):
unique_lbls = np.unique(frame_labels)
heat_val = np.zeros((len(value_names_dict.values()), len(unique_lbls)))
if name is not None:
title = aux.txt_wrap(
f"{opt.title()} Clustermap of Feature Value Distributions of {name}"
)
else:
title = aux.txt_wrap(
f"{opt.title()} Clustermap of Feature Value Distributions"
)
for i, (_, _) in enumerate(value_names_dict.items()):
for j, c in enumerate(sorted(unique_lbls)):
heat_val[i, j] = | np.mean(frame_values[frame_labels == c, i]) | numpy.mean |
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
"""Core integrated routines for detecting and characterizing crosstalk"""
import numpy as _np
from . import objects as _obj
from ... import objects as _pygobjs
from ... import io as _pygio
import pcalg
from gsq.ci_tests import ci_test_dis
import collections
from sympy import isprime
def tuple_replace_at_index(tup, ix, val):
return tup[:ix] + (val,) + tup[ix + 1:]
def load_pygsti_dataset(filename):
"""
Loads a pygsti dataset from file.
This is a wrapper that just checks the first line, and replaces it with the newer outcome specification
format if its the old type.
"""
try:
# file = open(filename, "r")
open(filename, "r")
except IOError:
print("File not found, or other file IO error.")
# lines = file.readlines()
# file.close()
# if lines[0] == "## Columns = 00 count, 01 count, 10 count, 11 count\n":
# lines[0] = "## Columns = 0:0 count, 0:1 count, 1:0 count, 1:1 count\n"
# file = open(filename, "w")
# file.writelines(lines)
# file.close()
data = _pygio.load_dataset(filename)
return data
def flatten(l):
"""
Flattens an irregualr list.
From https://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def form_ct_data_matrix(ds, number_of_regions, settings, filter_lengths=[]):
# This converts a DataSet to an array since the code below uses arrays
if type(ds) == _pygobjs.dataset.DataSet:
opstr = ds.keys()[0]
temp = ds.auxInfo[opstr]['settings']
num_settings = len(temp)
settings_shape = _np.shape(settings)
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
dscopy = ds.copy_nonstatic()
# filter out lengths not in filter_lengths
if len(filter_lengths) > 0:
for k in dscopy.keys():
if len(k) not in filter_lengths:
dscopy.remove([k])
dscopy.done_adding_data()
# num columns = number of settings + number of regions (b/c we assume one outcome per region)
#num_columns = num_settings + number_of_regions
num_data = len(dscopy.keys())
data = []
collect_settings = {key: [] for key in range(num_settings)}
for row in range(num_data):
opstr = dscopy.keys()[row]
templine_set = [0] * num_settings
settings_row = dscopy.auxInfo[opstr]['settings']
for key in settings_row:
if len(key) == 1: # single region/qubit gate
templine_set[key[0]] = settings_row[key]
collect_settings[key[0]].append(settings_row[key])
else: # two-region/two-qubit gate
print("Two qubit gate, not sure what to do!!") # TODO
return
outcomes_row = dscopy[opstr]
for outcome in outcomes_row:
templine_out = [0] * number_of_regions
if len(outcome[0]) == 1:
# outcomes labeled by bitstrings
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][0][r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
else:
# outcomes labeled by tuples of bits
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
for r in range(num_rep):
data.append(flattened_line)
#num_seqs = [len(set(collect_settings[i])) for i in range(num_settings)]
data = _np.asarray(data)
# if the dataset is specified by a string assume its a filename with a saved numpy array
elif type(ds) == str:
data = _np.loadtxt(ds)
data = data.astype(int)
data_shape = _np.shape(data)
settings_shape = _np.shape(settings)
# Check that the input data is a 2D array
assert(len(data_shape) == 2)
"Input data format is incorrect!If the input is a numpy array it must be 2-dimensional."
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
# The number of columns in the data must be consistent with the number of settings
assert(data_shape[1] == (sum(settings) + number_of_regions))
"Mismatch between the number of settings specified for each region and the number of columns in data"
num_data = data_shape[0]
#num_columns = data_shape[1]
# if neither a pygsti data set or string, assume a numpy array was passed in
else:
data_shape = _np.shape(ds)
settings_shape = _np.shape(settings)
# Check that the input data is a 2D array
assert(len(data_shape) == 2)
"Input data format is incorrect!If the input is a numpy array it must be 2-dimensional."
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
# The number of columns in the data must be consistent with the number of settings
assert(data_shape[1] == (sum(settings) + number_of_regions))
"Mismatch between the number of settings specified for each region and the number of columns in data"
data = ds
data_shape = _np.shape(data)
return data, data_shape
def do_basic_crosstalk_detection(ds, number_of_regions, settings, confidence=0.95, verbosity=1, name=None,
assume_independent_settings=True, filter_lengths=[]):
"""
Implements crosstalk detection on multiqubit data (fine-grained data with entries for each experiment).
Parameters
----------
ds : pyGSTi DataSet or numpy array
The multiqubit data to analyze. If this is a numpy array, it must contain time series data and it must
be 2-dimensional with each entry being a sequence of settings and measurment outcomes for each qubit region.
A region is a set of one or more qubits and crosstalk is assessed between regions. The first n entries are
the outcomes and the following entries are settings.
number_of_regions: int, number of regions in experiment
settings: list of length number_of_regions, indicating the number of settings for each qubit region.
confidence : float, optional
verbosity : int, optional
name : str, optional
filter_lengths : list of lengths. If this is not empty the dataset will be filtered and the analysis will only be
done on the sequences of lengths specified in this list. This argument is only used if the dataset is passed in
as a pyGSTi DataSet
Returns
-------
results : CrosstalkResults object
The results of the crosstalk detection analysis. This contains: output skeleton graph and DAG from
PC Algorithm indicating regions with detected crosstalk, all of the input information.
"""
# -------------------------- #
# Format and check the input #
# -------------------------- #
# This converts a DataSet to an array since the code below uses arrays
# -------------------------- #
if type(ds) != _pygobjs.dataset.DataSet:
data_shape = _np.shape(ds)
settings_shape = _np.shape(settings)
# Check that the input data is a 2D array
assert(len(data_shape) == 2), \
"Input data format is incorrect!If the input is a numpy array it must be 2-dimensional."
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
# The number of columns in the data must be consistent with the number of settings
assert(data_shape[1] == (sum(settings) + number_of_regions))
"Mismatch between the number of settings specified for each region and the number of columns in data"
data = ds
num_data = data_shape[0]
num_columns = data_shape[1]
# This converts a DataSet to an array, as the code below uses arrays
if type(ds) == _pygobjs.dataset.DataSet:
opstr = ds.keys()[0]
temp = ds.auxInfo[opstr]['settings']
num_settings = len(temp)
settings_shape = _np.shape(settings)
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
dscopy = ds.copy_nonstatic()
# filter out lengths not in filter_lengths
if len(filter_lengths) > 0:
for k in dscopy.keys():
if len(k) not in filter_lengths:
dscopy.remove([k])
dscopy.done_adding_data()
# num columns = number of settings + number of regions (b/c we assume one outcome per region)
num_columns = num_settings + number_of_regions
num_data = len(dscopy.keys())
data = []
collect_settings = {key: [] for key in range(num_settings)}
for row in range(num_data):
opstr = dscopy.keys()[row]
templine_set = [0] * num_settings
settings_row = dscopy.auxInfo[opstr]['settings']
for key in settings_row:
if len(key) == 1: # single region/qubit gate
templine_set[key[0]] = settings_row[key]
collect_settings[key[0]].append(settings_row[key])
else: # two-region/two-qubit gate
print("Two qubit gate, not sure what to do!!") # TODO
return
outcomes_row = dscopy[opstr]
for outcome in outcomes_row:
templine_out = [0] * number_of_regions
if len(outcome[0]) == 1:
# outcomes labeled by bitstrings
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][0][r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
else:
# outcomes labeled by tuples of bits
for r in range(number_of_regions):
templine_out[r] = int(outcome[0][1][0][r]) # templine_out[r] = int(outcome[0][r])
# print(templine_out[r])
num_rep = int(outcome[2])
templine_out.append(templine_set)
flattened_line = list(flatten(templine_out))
for r in range(num_rep):
data.append(flattened_line)
data = _np.asarray(data)
# if the dataset is specified by a string assume its a filename with a saved numpy array
elif type(ds) == str:
data = _np.loadtxt(ds)
data = data.astype(int)
data_shape = _np.shape(data)
settings_shape = _np.shape(settings)
# Check that the input data is a 2D array
assert(len(data_shape) == 2)
"Input data format is incorrect!If the input is a numpy array it must be 2-dimensional."
# Check that settings is a list of length number_of_regions
assert((len(settings_shape) == 1) and (settings_shape[0] == number_of_regions))
"settings should be a list of the same length as number_of_regions."
# The number of columns in the data must be consistent with the number of settings
assert(data_shape[1] == (sum(settings) + number_of_regions))
"Mismatch between the number of settings specified for each region and the number of columns in data"
num_data = data_shape[0]
num_columns = data_shape[1]
# if neither a pygsti data set or string, assume a numpy array was passed in
else:
data_shape = | _np.shape(ds) | numpy.shape |
"""Test the LinearSystemComp."""
import unittest
import numpy as np
from openmdao.api import Group, Problem, IndepVarComp
from openmdao.api import LinearSystemComp, ScipyIterativeSolver, DirectSolver
from openmdao.devtools.testutil import assert_rel_error
class TestLinearSystem(unittest.TestCase):
"""Test the LinearSystemComp class with a 3x3 linear system."""
def test_linear_system(self):
"""Check against the scipy solver."""
model = Group()
x = np.array([1, 2, -3])
A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]])
b = A.dot(x)
model.add_subsystem('p1', IndepVarComp('A', A))
model.add_subsystem('p2', IndepVarComp('b', b))
lingrp = model.add_subsystem('lingrp', Group(), promotes=['*'])
lingrp.add_subsystem('lin', LinearSystemComp(size=3, partial_type="matrix_free"))
model.connect('p1.A', 'lin.A')
model.connect('p2.b', 'lin.b')
prob = Problem(model)
prob.setup()
lingrp = prob.model.get_subsystem('lingrp')
lingrp.linear_solver = ScipyIterativeSolver()
prob.set_solver_print(level=0)
prob.run_model()
assert_rel_error(self, prob['lin.x'], x, .0001)
assert_rel_error(self, prob.model._residuals.get_norm(), 0.0, 1e-10)
def test_linear_system_solve_linear(self):
"""Check against solve_linear."""
x = np.array([1, 2, -3])
A = np.array([[1., 1., 1.], [1., 2., 3.], [0., 1., 3.]])
b = A.dot(x)
b_T = A.T.dot(x)
def check_derivs(lin_sys_comp):
prob = Problem()
prob.model.add_subsystem('p1', IndepVarComp('A', A))
prob.model.add_subsystem('p2', IndepVarComp('b', b))
lingrp = prob.model.add_subsystem('lingrp', Group(), promotes=['*'])
lingrp.add_subsystem('lin', lin_sys_comp)
prob.model.connect('p1.A', 'lin.A')
prob.model.connect('p2.b', 'lin.b')
prob.setup(check=False)
prob.set_solver_print(level=0)
prob.run_model()
prob.model.run_linearize()
# prob.check_partials()
# Compare against calculated derivs
# Ainv = np.linalg.inv(A) # Don't use linalg.inv or a mathematician will die
Ainv = np.array([[3., -2., 1.],
[-3., 3., -2.],
[1., -1., 1.]])
dx_dA = | np.outer(Ainv, -x) | numpy.outer |
import logging
from typing import Dict, List, Tuple, Iterable, Union, Any, Callable
import os
from overrides import overrides
import spacy
import numpy as np
from random import shuffle
from itertools import combinations
from operator import itemgetter
from collections import defaultdict
from tqdm import tqdm
from copy import deepcopy
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.tree import Tree
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import enumerate_spans
from allennlp.data.fields import Field, TextField, SpanField, ListField, SequenceLabelField, \
ArrayField, LabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
def strip_functional_tags(tree: Tree) -> None:
"""
Removes all functional tags from constituency labels in an NLTK tree.
We also strip off anything after a =, - or | character, because these
are functional tags which we don't want to use.
This modification is done in-place.
"""
clean_label = tree.label().split("=")[0].split("-")[0].split("|")[0]
tree.set_label(clean_label)
for child in tree:
if not isinstance(child[0], str):
strip_functional_tags(child)
def get_trees_from_bracket_file(filename) -> List[Tree]:
directory, filename = os.path.split(filename)
trees = list(BracketParseCorpusReader(root=directory, fileids=[filename]).parsed_sents())
modified_trees = []
for tree in trees:
strip_functional_tags(tree)
# This is un-needed and clutters the label space.
# All the trees also contain a root S node.
if tree.label() == "VROOT" or tree.label() == "TOP":
tree = tree[0]
modified_trees.append(tree)
return modified_trees
class MyToken:
def __init__(self, text, idx):
self.text = text
self.idx = idx
def __str__(self):
return str((self.text, self.idx))
def __repr__(self):
return str((self.text, self.idx))
def str2bool(text):
text = text.lower()
if text == 'true':
return True
elif text == 'false':
return False
raise Exception('cannot be converted to bool')
def adjust_tokens_wrt_char_boundary(tokens: List[Token], char_boundaries: List[int]):
'''
positions indicated by char_boundaries should be segmented.
If one of the indices is 3, it mean that there is a boundary between the 3rd and 4th char.
Indices in char_boundaries should be in ascending order.
'''
new_tokens: List[MyToken] = []
cb_ind = 0
for tok in tokens:
start = tok.idx
end = tok.idx + len(tok.text)
ext_bd = []
while cb_ind < len(char_boundaries) and char_boundaries[cb_ind] <= end:
bd = char_boundaries[cb_ind]
if bd != start and bd != end: # boundary not detected by tokenizer
ext_bd.append(bd)
cb_ind += 1
for s, e in zip([start] + ext_bd, ext_bd + [end]):
text = tok.text[s - start:e - start]
new_tokens.append(MyToken(text, s))
return new_tokens
class BratDoc:
EVENT_JOIN_SYM = '->'
NEG_SPAN_LABEL = '<NEG_SPAN>'
NEG_SPAN_PAIR_LABEL = '<NEG_SPAN_PAIR>'
def __init__(self,
id: str,
doc: Union[str, List[str]],
# span_id -> (span_label, start_ind, end_ind),
# where start_ind is inclusive and end_ind is exclusive
spans: Dict[str, Tuple[str, int, int]],
# (span_id1, span_id2) -> span_pair_label
span_pairs: Dict[Tuple[str, str], str],
bracket_file: str = None,
tree: Tree = None):
self.id = id
self.doc = doc # can be str of chars or a list of tokens
self.spans = spans
self.span_pairs = span_pairs
self.bracket_file = bracket_file
self.tree = tree
def get_span_weights(self) -> Dict[str, float]:
''' compute the weight of the span by how many times it appears in span pairs '''
span2count: Dict[str, int] = defaultdict(lambda: 0)
for sid1, sid2 in self.span_pairs:
span2count[sid1] += 1
span2count[sid2] += 1
return dict((k, float(span2count[k])) for k in self.spans) # can be zero
def skip_span_pairs(self, labels: set):
self.span_pairs = dict((k, v) for k, v in self.span_pairs.items() if v not in labels)
def skip_span(self, labels: set):
self.spans = dict((k, v) for k, v in self.spans.items() if v not in labels)
def remove_span_not_in_pair(self):
sp_set = set(k_ for k in self.span_pairs for k_ in k)
self.spans = dict((k, v) for k, v in self.spans.items() if k in sp_set)
def filter(self, max_span_width: int = None):
''' remove spans longer than max_span_width '''
if max_span_width is None:
return
new_spans = {}
for sid, (slabel, sind, eind) in self.spans.items():
if eind - sind <= max_span_width:
new_spans[sid] = (slabel, sind, eind)
new_span_pairs = {}
for (sid1, sid2), slabel in self.span_pairs.items():
if sid1 in new_spans and sid2 in new_spans:
new_span_pairs[(sid1, sid2)] = slabel
self.spans = new_spans
self.span_pairs = new_span_pairs
def truncate(self, max_doc_len):
''' truncate the document '''
# if doc is list of tokens, max_doc_len is the number of tokens to keep
# if doc is str, max_doc_len is the number of characters to keep
self.doc = self.doc[:max_doc_len]
new_spans = {}
for sid, (slabel, sind, eind) in self.spans.items():
if sind >= max_doc_len or eind > max_doc_len:
continue
new_spans[sid] = (slabel, sind, eind)
new_span_pairs = {}
for (sid1, sid2), slabel in self.span_pairs.items():
if sid1 in new_spans and sid2 in new_spans:
new_span_pairs[(sid1, sid2)] = slabel
self.spans = new_spans
self.span_pairs = new_span_pairs
def build_cluster(self, inclusive=False) -> List[List[Tuple[int, int]]]:
cluster: Dict[Tuple[int, int], int] = {}
num_clusters = 0
num_overlap_pairs = 0
for k1, k2 in self.span_pairs:
offset = 1 if inclusive else 0
span_parent = (self.spans[k1][1], self.spans[k1][2] - offset)
span_child = (self.spans[k2][1], self.spans[k2][2] - offset)
if self.spans[k1][1] < self.spans[k2][2]:
num_overlap_pairs += 1
if span_child not in cluster and span_parent not in cluster:
cluster[span_child] = num_clusters
cluster[span_parent] = num_clusters
num_clusters += 1
elif span_child in cluster and span_parent in cluster:
if cluster[span_parent] != cluster[span_child]: # merge
from_clu = cluster[span_parent]
to_clu = cluster[span_child]
for k in cluster:
if cluster[k] == from_clu:
cluster[k] = to_clu
elif span_child in cluster:
cluster[span_parent] = cluster[span_child]
elif span_parent in cluster:
cluster[span_child] = cluster[span_parent]
result = defaultdict(list)
for k, v in cluster.items():
result[v].append(k)
return list(result.values())
def enumerate_spans(self, *args, **kwargs):
for start, end in enumerate_spans(self.doc, *args, **kwargs):
yield start, end + 1 # enumerate_spans is inclusive
def get_all_neg_spans(self, max_span_width: int = None) -> Dict[str, Tuple[str, int, int]]:
''' get all negative spans '''
pos_span_poss = set((v[1], v[2]) for k, v in self.spans.items())
if type(self.doc) is not list:
raise Exception('doc must be tokenized before getting all spans')
neg_spans = {}
for start, end in self.enumerate_spans(offset=0, max_span_width=max_span_width):
if (start, end) not in pos_span_poss:
# 'TN' for 'T' and negative
neg_spans['TN' + str(len(neg_spans) + 1)] = (self.NEG_SPAN_LABEL, start, end)
return neg_spans
def get_negative_spans_and_span_pairs(self,
neg_ratio: float,
max_span_width: int = None,
neg_spans: Dict[str, Tuple[str, int, int]] = None) \
-> Tuple[Dict[str, Tuple[str, int, int]], Dict[Tuple[str, str], str]]:
'''
Get negatvie spans and span pairs and the number is proportional to the number of words.
When neg_spans is provided, directly use it to generate negative pairs.
'''
seq_len = len(self.doc)
# At least one negative example. This is for special cases where the sentences are really short
num_neg = max(1, int(neg_ratio * seq_len))
if neg_spans is None:
# generate negative spans
pos_span_poss = set((v[1], v[2]) for k, v in self.spans.items())
if type(self.doc) is not list:
raise Exception('doc must be tokenized before generating negative samples')
all_spans = list(self.enumerate_spans(offset=0, max_span_width=max_span_width))
shuffle(all_spans)
neg_spans = {}
for start, end in all_spans:
if (start, end) not in pos_span_poss:
if len(neg_spans) >= num_neg:
break
# 'TN' for 'T' and negative
neg_spans['TN' + str(len(neg_spans) + 1)] = (self.NEG_SPAN_LABEL, start, end)
# generate negative span pairs
pos_span_pair_ids = set(self.span_pairs.keys())
all_span_ids = list(self.spans.keys()) + list(neg_spans.keys())
neg_span_pairs = {}
used = set()
def comb():
for i in range(len(all_span_ids) * len(all_span_ids)): # n^2 iterations at most
r1, r2 = np.random.randint(0, len(all_span_ids), 2)
yield all_span_ids[r1], all_span_ids[r2]
for s1, s2 in comb():
if (s1, s2) not in pos_span_pair_ids and (s1, s2) not in used:
if len(neg_span_pairs) >= num_neg:
break
neg_span_pairs[(s1, s2)] = self.NEG_SPAN_PAIR_LABEL
used.add((s1, s2))
return neg_spans, neg_span_pairs
def to_word(self, tokenizer):
''' segment doc and convert char-based index to word-based index '''
# tokenize
toks = tokenizer(self.doc)
char_bd = set()
for sid, (slabel, start, end) in self.spans.items():
char_bd.add(start)
char_bd.add(end)
toks = adjust_tokens_wrt_char_boundary(toks, char_boundaries=sorted(char_bd))
words = [tok.text for tok in toks] # TODO: add normalization?
# build char ind to token ind mapping
idxs = [(tok.idx, tok.idx + len(tok.text)) for tok in toks]
sidx2tidx = dict((s[0], i) for i, s in enumerate(idxs)) # char start ind -> token ind
eidx2tidx = dict((s[1], i) for i, s in enumerate(idxs)) # char end ind -> token ind
# convert spans
new_spans = {}
for sid, (span_label, sidx, eidx) in self.spans.items():
if sidx in sidx2tidx and eidx in eidx2tidx:
new_spans[sid] = (span_label, sidx2tidx[sidx], eidx2tidx[eidx] + 1) # end index is exclusive
else: # remove blanks and re-check
span_str = self.doc[sidx:eidx]
blank_str = len(span_str) - len(span_str.lstrip())
blank_end = len(span_str) - len(span_str.rstrip())
sidx += blank_str
eidx -= blank_end
if sidx in sidx2tidx and eidx in eidx2tidx:
new_spans[sid] = (span_label, sidx2tidx[sidx], eidx2tidx[eidx] + 1) # end index is exclusive
else: # the annotation boundary is not consistent with the tokenization boundary
raise Exception
# convert span pairs
new_span_pairs = dict(((s1, s2), v) for (s1, s2), v in self.span_pairs.items()
if s1 in new_spans and s2 in new_spans)
return BratDoc(self.id, words, new_spans, new_span_pairs, bracket_file=self.bracket_file, tree=self.tree)
def split_by_sentence(self, sentencizer=None) -> List:
''' split into multiple docs by sentence boundary '''
sents = list(sentencizer(self.doc)) # sentencizer should return the offset between two adjacent sentences
# split bracket file
if self.bracket_file:
trees = get_trees_from_bracket_file(self.bracket_file)
assert len(trees) == len(sents), '#sent not equal to #tree'
# collect spans for each sentence
spans_ord = sorted(self.spans.items(), key=lambda x: (x[1][1], x[1][2])) # sorted by start ind and end ind
num_skip_char = 0
span_ind = 0
spans_per_sent = []
for i, (sent, off) in enumerate(sents):
num_skip_char += off
spans_per_sent.append([])
cur_span = spans_per_sent[-1]
# start ind and end ind should be not larger than a threshold
while span_ind < len(spans_ord) and \
spans_ord[span_ind][1][1] < num_skip_char + len(sent) and \
spans_ord[span_ind][1][2] <= num_skip_char + len(sent):
if spans_ord[span_ind][1][1] < num_skip_char or \
spans_ord[span_ind][1][2] <= num_skip_char:
logger.warning('span is spreaded across sentences')
span_ind += 1
continue
sid, (slabel, sind, eind) = spans_ord[span_ind]
cur_span.append((sid, (slabel, sind - num_skip_char, eind - num_skip_char)))
span_ind += 1
num_skip_char += len(sent)
# collect span pairs for each sentence
pair_count = 0
brat_doc_li = []
for i, spans in enumerate(spans_per_sent):
if len(sents[i][0]) <= 0: # skip empty sentences
continue
span_ids = set(span[0] for span in spans)
span_pair = dict(((s1, s2), v) for (s1, s2), v in self.span_pairs.items()
if s1 in span_ids and s2 in span_ids)
pair_count += len(span_pair)
tree = trees[i] if self.bracket_file else None
brat_doc_li.append(BratDoc(self.id, sents[i][0], dict(spans), span_pair, tree=tree))
# TODO: span pairs across sentences are allowed
#assert pair_count == len(self.span_pairs), 'some span pairs are spreaded across sentences'
return brat_doc_li
@classmethod
def dummy(cls):
return cls('id', ['token'], {}, {})
@classmethod
def from_file(cls, text_file: str, ann_file: str, bracket_file: str = None):
''' read text and annotations from files in BRAT format '''
with open(text_file, 'r') as txtf:
doc = txtf.read().rstrip()
spans = {}
span_pairs = {}
eventid2triggerid = {} # e.g., E10 -> T27
with open(ann_file, 'r') as annf:
for l in annf:
if l.startswith('#'): # skip comment
continue
if l.startswith('T'):
# 1. there are some special chars at the end of the line, so we only strip \n
# 2. there are \t in text spans, so we only split twice
ann = l.rstrip('\t\n').split('\t', 2)
else:
ann = l.rstrip().split('\t')
aid = ann[0]
if aid.startswith('T'): # text span annotation
# TODO: consider non-contiguous span
span_label, sind, eind = ann[1].split(';')[0].split(' ')
sind, eind = int(sind), int(eind)
spans[aid] = (span_label, sind, eind)
# TODO: sometime there are spaces, sometimes not, so we cannot assert
# sanity check
#if len(ann) > 2 and ann[1].find(';') < 0:
# assert ann[2] == doc[sind:eind]
elif aid.startswith('E'): # event span annotation
events = ann[1].split(' ')
trigger_type, trigger_aid = events[0].split(':')
eventid2triggerid[aid] = trigger_aid
for event in events[1:]:
arg_type, arg_aid = event.split(':')
span_pairs[(trigger_aid, arg_aid)] = trigger_type + cls.EVENT_JOIN_SYM + arg_type
elif aid.startswith('R'): # relation annotation
rel = ann[1].split(' ')
assert len(rel) == 3
rel_type = rel[0]
arg1_aid = rel[1].split(':')[1]
arg2_aid = rel[2].split(':')[1]
span_pairs[(arg1_aid, arg2_aid)] = rel_type
elif aid.startswith('N'): # normalization annotation
# TODO: how to deal with normalization?
pass
elif not aid[0].istitle():
continue # skip lines not starting with upper case characters
else:
raise NotImplementedError
# convert event id to text span id
span_pairs_converted = {}
for (sid1, sid2), v in span_pairs.items():
if sid1.startswith('E'):
sid1 = eventid2triggerid[sid1]
if sid2.startswith('E'):
sid2 = eventid2triggerid[sid2]
span_pairs_converted[(sid1, sid2)] = v
return cls(ann_file, doc, spans, span_pairs_converted, bracket_file=bracket_file)
@staticmethod
def normalize_word(word):
if word == '/.' or word == '/?':
return word[1:]
else:
return word
class Brat:
def doc_iter(self, root_dir: str, sentencizer=None):
'''
generate a brat doc for each pair of files (.txt and .ann).
If sentencizer is not None, the document is splitted into several sentences.
'''
for root, dirs, files in os.walk(root_dir, followlinks=True):
for file in files:
if not file.endswith('.txt'):
continue
text_file = os.path.join(root, file)
ann_file = os.path.join(root, file[:-4] + '.ann')
bracket_file = os.path.join(root, file[:-4] + '.bracket')
if not os.path.exists(ann_file):
continue
if not os.path.exists(bracket_file):
bracket_file = None
brat_doc = BratDoc.from_file(text_file, ann_file, bracket_file=bracket_file)
if sentencizer:
yield from brat_doc.split_by_sentence(sentencizer=sentencizer)
else:
yield brat_doc
@DatasetReader.register('brat')
class BratReader(DatasetReader):
'''
Read files in BRAT format.
The directory should contain pairs of files whose extensions are '.txt' and '.ann' respectively.
Each text file is a document, and is annotated by chars.
'''
PADDING_LABEL = '<PADDING_LABEL>'
def __init__(self,
default_task: str,
task_sample_rate: List[int] = None,
span_weighted_by_pairs: Dict[str, bool] = None,
restart_file: bool = True,
use_neg: Dict[str, bool] = None, # number of negative spans/span pairs per positive one
max_span_width: Dict[str, int] = None, # max number of words in a span
max_sent_len: Dict[str, int] = None, # max length of a sentence
max_num_sample: Dict[str, int] = None, # max number of samples for each task
remove_span_not_in_pair: Dict[str, bool] = None, # e.g., predicates without args in SRL
eval_span_pair_skip: Dict[str, List] = None, # skip these labels during evaluation
sentencizer: Dict[str, str] = None,
task_sampler: str = None,
tokenizer: Dict[str, str] = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super(BratReader, self).__init__(lazy)
self._default_task = default_task
self._task_sample_rate = task_sample_rate
# task name -> whether to use weights for span loss (not use by default)
self._span_weighted_by_pairs = span_weighted_by_pairs or defaultdict(lambda: False)
self._restart_file = restart_file
if use_neg is None:
use_neg = defaultdict(lambda: True)
self._use_neg = use_neg
if max_span_width is None:
max_span_width = defaultdict(lambda: None) # default is not restriction
self._max_span_width = max_span_width
if max_sent_len is None:
max_sent_len = defaultdict(lambda: None) # default not truncate
self._max_sent_len = max_sent_len
if max_num_sample is None:
max_num_sample = defaultdict(lambda: None) # default no limit
self._max_num_sample = max_num_sample
self._task_sampler = task_sampler
if remove_span_not_in_pair is None:
remove_span_not_in_pair = defaultdict(lambda: False)
self._remove_span_not_in_pair = remove_span_not_in_pair
self._eval_span_pair_skip = None
if eval_span_pair_skip is not None:
self._eval_span_pair_skip = defaultdict(set)
for k, v in eval_span_pair_skip.items():
self._eval_span_pair_skip[k].update(v)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'ner'])
self._nlp.add_pipe(self._nlp.create_pipe('sentencizer'))
# sentencizer
def sentencizer_spacy(doc): # TODO: use sentencizer might cause lots of spans across sentences
n_newline = 0
for i, _sent in enumerate(doc.split('\n')):
prev = -n_newline
sents = self._nlp(_sent)
sents.is_parsed = True # spacy's bug
for j, sent in enumerate(sents.sents):
off = sent.start_char - prev
prev = sent.end_char
yield sent.text, off
n_newline = len(_sent) - sent.end_char
n_newline += 1 # for "\n"
def sentencizer_newline(doc):
for i, _sent in enumerate(doc.split('\n')):
yield _sent, 0 if i == 0 else 1
def sentencizer_concat(doc):
yield doc.replace('\n', ' '), 0
n2sentencizer = {
'spacy': sentencizer_spacy,
'newline': sentencizer_newline,
'concat': sentencizer_concat
}
if sentencizer is None:
self._sentencizer = defaultdict(lambda: sentencizer_newline)
else:
self._sentencizer = dict((k, n2sentencizer[v]) for k, v in sentencizer.items())
# tokenizer
tokenizer_spacy = lambda sent: self._nlp(sent)
def tokenizer_space(sent):
tokens = []
offset = 0
for i, t in enumerate(sent.split(' ')):
tokens.append(MyToken(t, offset))
offset += len(t) + 1 # for space
return tokens
n2tokenizer = {
'spacy': tokenizer_spacy,
'space': tokenizer_space
}
if tokenizer is None:
self._tokenizer = defaultdict(lambda: tokenizer_spacy)
else:
self._tokenizer: Dict[str, Callable] = dict((k, n2tokenizer[v]) for k, v in tokenizer.items())
@overrides
def _read(self, file_path: str):
task_fp_li = file_path.split(':') # dirs for multiple tasks are separated by ":"
task_fp_li = [tfp.split('|', 2) for tfp in task_fp_li] # each task is of the format "task|e2e|filepath"
task_fp_li = [(task, str2bool(e2e), file_path) for task, e2e, file_path in task_fp_li]
is_training = file_path.find('/train') != -1
task_sample_rate = self._task_sample_rate
if task_sample_rate is None: # use uniform sampling as default
task_sample_rate = [1] * len(task_fp_li)
assert len(task_sample_rate) == len(task_fp_li), 'length inconsistent'
# use task sampler instead of specifying sampling rate manually
if is_training and self._task_sampler:
if not hasattr(self, '_num_samples'):
self._num_samples = np.array([list(self._read_one_task(
file_path, task=task, e2e=e2e, estimate_count=True))[0]
for task, e2e, file_path in task_fp_li])
logger.info('#samples estimation: {}'.format(self._num_samples))
if self._task_sampler == 'log':
task_sample_rate = self._num_samples / np.min(self._num_samples)
task_sample_rate = | np.log2(task_sample_rate + 1) | numpy.log2 |
import pickle
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import numpy as np
import os
import clip
from PIL import Image
from sklearn.decomposition import PCA
from ZSSGAN.utils.text_templates import imagenet_templates, part_templates, imagenet_templates_small
class DirectionLoss(torch.nn.Module):
def __init__(self, loss_type='mse'):
super(DirectionLoss, self).__init__()
self.loss_type = loss_type
self.loss_func = {
'mse': torch.nn.MSELoss,
'cosine': torch.nn.CosineSimilarity,
'mae': torch.nn.L1Loss
}[loss_type]()
def forward(self, x, y):
if self.loss_type == "cosine":
return 1. - self.loss_func(x, y)
return self.loss_func(x, y)
class CLIPLoss(torch.nn.Module):
def __init__(self, device, lambda_direction=1., lambda_patch=0., lambda_global=0., \
lambda_manifold=0., lambda_texture=0., patch_loss_type='mae', \
direction_loss_type='cosine', clip_model='ViT-B/32', args=None):
super(CLIPLoss, self).__init__()
self.device = device
self.args = args
self.model_name = clip_model
self.model, clip_preprocess = clip.load(clip_model, device=self.device)
# self.model.requires_grad_(False)
self.clip_preprocess = clip_preprocess
self.preprocess = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
clip_preprocess.transforms[:2] + # to match CLIP input scale assumptions
clip_preprocess.transforms[4:]) # + skip convert PIL to tensor
self.target_direction = None
self.patch_text_directions = None
self.patch_loss = DirectionLoss(patch_loss_type)
self.direction_loss = DirectionLoss(direction_loss_type)
self.patch_direction_loss = torch.nn.CosineSimilarity(dim=2)
self.lambda_global = lambda_global
self.lambda_patch = lambda_patch
self.lambda_direction = lambda_direction
self.lambda_manifold = lambda_manifold
self.lambda_texture = lambda_texture
self.alpha = args.alpha
self.src_text_features = None
self.target_text_features = None
self.angle_loss = torch.nn.L1Loss()
self.id_loss = DirectionLoss('cosine')
# self.model_cnn, preprocess_cnn = clip.load("RN50", device=self.device)
# self.preprocess_cnn = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
# preprocess_cnn.transforms[:2] + # to match CLIP input scale assumptions
# preprocess_cnn.transforms[4:]) # + skip convert PIL to tensor
self.texture_loss = torch.nn.MSELoss()
self.pca_components = None
self.condition = None
self.pca_threshold = None
self.clip_mean = None
self.pca = self.get_pca()
def get_pca(self):
orig_sample_path = os.path.join('../weights/clip_mean/', f"{self.args.dataset}_{self.model_name[-2::]}_samples.pkl")
with open(orig_sample_path, 'rb') as f:
X = pickle.load(f)
X = np.array(X)
self.samples = X
self.clip_mean = torch.from_numpy(np.mean(X, axis=0)).float().to(self.device)
# Define a pca and train it
pca = PCA(n_components=self.args.pca_dim)
pca.fit(X)
# Get the standar deviation of samples and set threshold for each dimension
# threshold = np.sqrt(pca.explained_variance_) * self.alpha
# self.pca_threshold = torch.from_numpy(threshold).float().to(self.device)
# self.pca_components = torch.from_numpy(pca.components_).float().to(self.device)
return pca
def tokenize(self, strings: list):
return clip.tokenize(strings).to(self.device)
def encode_text(self, tokens: list) -> torch.Tensor:
return self.model.encode_text(tokens)
def encode_images(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess(images).to(self.device)
return self.model.encode_image(images)
def encode_images_with_cnn(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess_cnn(images).to(self.device)
return self.model_cnn.encode_image(images)
def distance_with_templates(self, img: torch.Tensor, class_str: str, templates=imagenet_templates) -> torch.Tensor:
text_features = self.get_text_features(class_str, templates)
image_features = self.get_image_features(img)
similarity = image_features @ text_features.T
return 1. - similarity
def get_text_features(self, class_str: str, templates=imagenet_templates, norm: bool = True) -> torch.Tensor:
template_text = self.compose_text_with_templates(class_str, templates)
tokens = clip.tokenize(template_text).to(self.device)
text_features = self.encode_text(tokens).detach()
if norm:
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features
def get_image_features(self, img: torch.Tensor, norm: bool = True) -> torch.Tensor:
image_features = self.encode_images(img)
if norm:
image_features /= image_features.clone().norm(dim=-1, keepdim=True)
return image_features
def get_similar_img(self, tgt_vec):
tgt = tgt_vec[0].cpu().numpy()
sim = np.dot(self.samples, tgt)
orders = np.argsort(sim)[::-1]
print("Orders: {}, Similarities: {}".format(orders[0:20], sim[orders[0:20]]))
src = self.samples[orders[0:1]]
src = src * sim[orders[0:1], None]
src = torch.from_numpy(src).to(tgt_vec.device, dtype=tgt_vec.dtype).mean(axis=0, keepdim=True)
# src /= src.norm(dim=-1, keepdim=True)
return src
def supress_normal_features(self, vec, is_target=False):
'''
Supress normal features of the given vector based on original StyleGAN
Params:
vec: the vector to be supressed
'''
if self.args.supress == 0:
return vec
elif self.args.supress == 1:
if self.condition is None or isinstance(self.condition, np.ndarray):
self.condition = torch.from_numpy(self.condition).unsqueeze(0).float().to(vec.device)
print("The number of style and special attrs: ", self.condition.sum())
return vec
elif self.args.supress == 2:
if self.clip_mean is not None:
vec = vec - self.clip_mean
vec_pca = vec @ self.pca_components.t()
if self.condition is None:
self.condition = (vec_pca[0].abs() > self.pca_threshold).unsqueeze(0).float()
return vec_pca * self.condition if is_target else vec_pca
else:
raise RuntimeError(f"The choice {self.args.supress} is illegal! Please choose it among 0, 1, 2.")
def keep_normal_features(self, vec):
'''
Keep normal features of the given vector based on original StyleGAN
'''
if self.args.supress == 0:
return vec * 0
elif self.args.supress == 1:
return vec * (1 - self.condition)
elif self.args.supress == 2:
if self.clip_mean is not None:
vec = vec - self.clip_mean
vec_pca = vec @ self.pca_components.t()
# return vec_pca * (1 - self.condition)
return vec_pca
else:
raise RuntimeError(f"The choice {self.args.supress} is illegal! Please choose it among 0, 1, 2.")
def get_pca_features(self, vec):
'''
Convert CLIP features to PCA features
'''
if self.clip_mean is None:
return vec
vec = vec - self.clip_mean
return vec @ self.pca_components.t()
def compute_text_direction(self, source_class: str, target_class: str) -> torch.Tensor:
source_features = self.clip_mean
target_features = self.get_text_features(target_class)
# Supress normal features and keep special features in the text feature
# target_features = self.supress_normal_features(target_features, is_target=True)
# source_features = self.supress_normal_features(source_features, is_target=True)
# source_features = 0
text_direction = (target_features - source_features).mean(axis=0, keepdim=True)
# text_direction = target_features.mean(axis=0, keepdim=True)
text_direction /= text_direction.norm(dim=-1, keepdim=True)
return text_direction
def get_raw_img_features(self, imgs: str):
pre_i = self.clip_preprocess(Image.open(imgs)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(pre_i)
encoding /= encoding.norm(dim=-1, keepdim=True)
return encoding
def compute_img2img_direction(self, source_images: torch.Tensor, target_images: list) -> torch.Tensor:
with torch.no_grad():
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
# src_encoding = self.get_image_features(source_images)
# src_encoding = src_encoding.mean(dim=0, keepdim=True)
src_encoding = self.clip_mean
# src_encoding = self.supress_normal_features(src_encoding, is_target=True)
direction = target_encoding - src_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def compute_corresponding_img2img_direction(self, source_images: list, target_images: list) -> torch.Tensor:
with torch.no_grad():
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
source_encodings = []
for source_img in source_images:
preprocessed = self.clip_preprocess(Image.open(source_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
source_encodings.append(encoding)
source_encoding = torch.cat(source_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
source_encoding = source_encoding.mean(dim=0, keepdim=True)
direction = target_encoding - source_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def set_text_features(self, source_class: str, target_class: str) -> None:
source_features = self.get_text_features(source_class).mean(axis=0, keepdim=True)
self.src_text_features = source_features / source_features.norm(dim=-1, keepdim=True)
target_features = self.get_text_features(target_class).mean(axis=0, keepdim=True)
self.target_text_features = target_features / target_features.norm(dim=-1, keepdim=True)
def clip_angle_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.src_text_features is None:
self.set_text_features(source_class, target_class)
cos_text_angle = self.target_text_features @ self.src_text_features.T
text_angle = torch.acos(cos_text_angle)
src_img_features = self.get_image_features(src_img).unsqueeze(2)
target_img_features = self.get_image_features(target_img).unsqueeze(1)
cos_img_angle = torch.clamp(target_img_features @ src_img_features, min=-1.0, max=1.0)
img_angle = torch.acos(cos_img_angle)
text_angle = text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
cos_text_angle = cos_text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
return self.angle_loss(cos_img_angle, cos_text_angle)
def compose_text_with_templates(self, text: str, templates=imagenet_templates) -> list:
return [template.format(text) for template in templates]
def clip_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.target_direction is None:
self.target_direction = self.compute_text_direction(source_class, target_class)
src_encoding = self.get_image_features(src_img)
# src_encoding = self.supress_normal_features(src_encoding, is_target=True)
target_encoding = self.get_image_features(target_img)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
edit_direction = (target_encoding - src_encoding)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
return self.direction_loss(edit_direction, self.target_direction).mean()
def pca_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.target_direction is None:
self.target_direction = self.compute_text_direction(source_class, target_class)
# if self.args.use_mean:
# src_encoding = self.clip_mean
# else:
src_encoding = self.get_image_features(src_img)
src_encoding = self.get_pca_features(src_encoding)
target_encoding = self.get_image_features(target_img)
target_encoding = self.get_pca_features(target_encoding)
edit_direction = (target_encoding - src_encoding)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
return self.direction_loss(edit_direction, self.target_direction).mean()
def global_clip_loss(self, img: torch.Tensor, text) -> torch.Tensor:
if not isinstance(text, list):
text = [text]
tokens = clip.tokenize(text).to(self.device)
image = self.preprocess(img)
logits_per_image, _ = self.model(image, tokens)
return (1. - logits_per_image / 100).mean()
def adaptive_global_clip_loss(self, img: torch.Tensor, text) -> torch.Tensor:
if self.alpha == 0:
return self.global_clip_loss(img, text)
text_features = self.get_text_features(text, templates=['{}'])
img_features = self.get_image_features(img)
text_features = text_features - self.pca_mean.unsqueeze(0)
text_features = text_features @ self.pca_cov.t()
img_features = img_features - self.pca_mean.unsqueeze(0)
img_features = img_features @ self.pca_cov.t()
logits_per_img = img_features @ text_features.t()
return (1. - logits_per_img).mean()
def random_patch_centers(self, img_shape, num_patches, size):
batch_size, channels, height, width = img_shape
half_size = size // 2
patch_centers = np.concatenate([ | np.random.randint(half_size, width - half_size, size=(batch_size * num_patches, 1)) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 04 22:18:20 2018
@author0: MIUK
@author1: FS
Generate datasets for ANN training testing.
TODO: Ensure compatibility with ANN scripts
TODO: Look at shape of Nuke's input file
"""
from ..utility import bipartite
import numpy as np
def gen_dataset(nb_train, nb_test, nb_m, e_min = 0, e_max = np.log(2),
subsyst = ['A', 'B'], check_e = False, states = False):
""" Generate data_set of measures based on random states of uniformly
distributed entropies
Arguments
nb_training: int
how many training examples do we want
nb_testing: int
how many testing examples do we want
nb_m: int
How many measurements are we doing
e_min: float
Min entropy
e_max: float
Max entropy
check_e: bool
Verify that entanglement required is the same as the one obtained
states: bool
should we return the underlying states used
info
Output
------
res: (train, test, info)
train/test: (X, Y, states(optional))
info: str
Provides information about the columns and parameters used to
generate the data
"""
info = []
info.append("Number of measuremets: {0} ".format(nb_m))
info.append("Ent min/max required {0}/{1}".format(e_min, e_max))
nb_total = nb_train + nb_test
ent = np.random.uniform(e_min, e_max, nb_total)
lambd = bipartite.ent_to_lambda(ent)
st = bipartite.rdm_states_from_lambda(lambd)
if check_e:
ent_final = bipartite.entangl_of_states(st)
assert np.allclose(ent_final, ent), "Entanglement produced don't match"
X = np.zeros((nb_total, 3 * len(subsyst)))
for i, ss in enumerate(subsyst):
info.append("X[{0}:{1}] X, Y, Z measurements on subsyst {2}".format(3*i, 3*i+2, ss))
X[:, (3 * i)] = bipartite.meas_one_sub(st, nb_m, [1,0,0], ss)
X[:, (3 * i + 1)] = bipartite.meas_one_sub(st, nb_m, [0,1,0], ss)
X[:, (3 * i + 2)] = bipartite.meas_one_sub(st, nb_m, [0,0,1], ss)
index = | np.arange(nb_total) | numpy.arange |
"""Decompose an x-polarized plane wave into the VSHs"""
import numpy as np
import matplotlib.pyplot as plt
import miepy
### source definition
source = miepy.sources.plane_wave.from_string(polarization='x')
k = 2*np.pi/1
Nmax = 5
### grid plane
x = | np.linspace(-.3,.3,30) | numpy.linspace |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import torch
import numpy as np
import scipy.misc as m
from tqdm import tqdm
from torch.utils import data
from PIL import Image
from data.augmentations import *
from data.base_dataset import BaseDataset
from data.randaugment import RandAugmentMC
import random
def recursive_glob(rootdir=".", suffix=""):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir) #os.walk: traversal all files in rootdir and its subfolders
for filename in filenames
if filename.endswith(suffix)
]
class Cityscapes_loader(BaseDataset):
"""cityscapesLoader
https://www.cityscapes-dataset.com
Data is derived from CityScapes, and can be downloaded from here:
https://www.cityscapes-dataset.com/downloads/
Many Thanks to @fvisin for the loader repo:
https://github.com/fvisin/dataset_loaders/blob/master/dataset_loaders/images/cityscapes.py
"""
colors = [ # [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
label_colours = dict(zip(range(19), colors))
mean_rgb = {
"pascal": [103.939, 116.779, 123.68],
"cityscapes": [0.0, 0.0, 0.0],
} # pascal mean for PSPNet and ICNet pre-trained model
def __init__(self, opt, logger, augmentations = None, split='train'):
"""__init__
:param opt: parameters of dataset
:param writer: save the result of experiment
:param logger: logging file
:param augmentations:
"""
self.opt = opt
self.root = opt.tgt_rootpath
self.split = split
self.augmentations = augmentations
self.randaug = RandAugmentMC(2, 10)
self.n_classes = opt.n_class
self.img_size = (2048, 1024)
self.mean = np.array(self.mean_rgb['cityscapes'])
self.files = {}
self.paired_files = {}
self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
self.annotations_base = os.path.join(
self.root, "gtFine", self.split
)
self.files = sorted(recursive_glob(rootdir=self.images_base, suffix=".png")) #find all files from rootdir and subfolders with suffix = ".png"
#self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
if self.n_classes == 19:
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33,]
self.class_names = ["unlabelled","road","sidewalk","building","wall",
"fence","pole","traffic_light","traffic_sign","vegetation",
"terrain","sky","person","rider","car",
"truck","bus","train","motorcycle","bicycle",
]
self.to19 = dict(zip(range(19), range(19)))
elif self.n_classes == 16:
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 23, 24, 25, 26, 28, 32, 33,]
self.class_names = ["unlabelled","road","sidewalk","building","wall",
"fence","pole","traffic_light","traffic_sign","vegetation",
"sky","person","rider","car","bus",
"motorcycle","bicycle",
]
self.to19 = dict(zip(range(16), [0,1,2,3,4,5,6,7,8,10,11,12,13,15,17,18]))
elif self.n_classes == 13:
self.valid_classes = [7, 8, 11, 19, 20, 21, 23, 24, 25, 26, 28, 32, 33,]
self.class_names = ["unlabelled","road","sidewalk","building","traffic_light",
"traffic_sign","vegetation","sky","person","rider",
"car","bus","motorcycle","bicycle",
]
self.to19 = dict(zip(range(13), [0,1,2,6,7,8,10,11,12,13,15,17,18]))
self.ignore_index = 250
self.class_map = dict(zip(self.valid_classes, range(self.n_classes))) #zip: return tuples
if not self.files:
raise Exception(
"No files for split=[%s] found in %s" % (self.split, self.images_base)
)
print("Found %d %s images" % (len(self.files), self.split))
def __len__(self):
"""__len__"""
return len(self.files)
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[index].rstrip()
lbl_path = os.path.join(
self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + "gtFine_labelIds.png",
)
img = Image.open(img_path)
lbl = Image.open(lbl_path)
img = img.resize(self.img_size, Image.BILINEAR)
lbl = lbl.resize(self.img_size, Image.NEAREST)
img = np.array(img, dtype=np.uint8)
lbl = np.array(lbl, dtype=np.uint8)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
img_full = img.copy().astype(np.float64)
img_full -= self.mean
img_full = img_full.astype(float) / 255.0
img_full = img_full.transpose(2, 0, 1)
lp, lpsoft, weak_params = None, None, None
if self.split == 'train' and self.opt.used_save_pseudo:
if self.opt.proto_rectify:
lpsoft = np.load(os.path.join(self.opt.path_soft, os.path.basename(img_path).replace('.png', '.npy')))
else:
lp_path = os.path.join(self.opt.path_LP, os.path.basename(img_path))
lp = Image.open(lp_path)
lp = lp.resize(self.img_size, Image.NEAREST)
lp = np.array(lp, dtype=np.uint8)
if self.opt.threshold and self.opt.threshold > 0:
conf = np.load(os.path.join(self.opt.path_LP, os.path.basename(img_path).replace('.png', '_conf.npy')))
lp[conf <= self.opt.threshold] = 250
input_dict = {}
if self.augmentations!=None:
img, lbl, lp, lpsoft, weak_params = self.augmentations(img, lbl, lp, lpsoft)
img_strong, params = self.randaug(Image.fromarray(img))
img_strong, _, _ = self.transform(img_strong, lbl)
input_dict['img_strong'] = img_strong
input_dict['params'] = params
img, lbl_, lp = self.transform(img, lbl, lp)
input_dict['img'] = img
input_dict['img_full'] = torch.from_numpy(img_full).float()
input_dict['label'] = lbl_
input_dict['lp'] = lp
input_dict['lpsoft'] = lpsoft
input_dict['weak_params'] = weak_params #full2weak
input_dict['img_path'] = self.files[index]
input_dict = {k:v for k, v in input_dict.items() if v is not None}
return input_dict
def transform(self, img, lbl, lp=None, check=True):
"""transform
:param img:
:param lbl:
"""
# img = m.imresize(
# img, (self.img_size[0], self.img_size[1])
# ) # uint8 with RGB mode
img = np.array(img)
# img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = np.array(lbl)
lbl = lbl.astype(float)
# lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), "nearest", mode="F")
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes") #TODO: compare the original and processed ones
if check and not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes): #todo: understanding the meaning
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
if lp is not None:
classes = | np.unique(lp) | numpy.unique |
#!/usr/bin/env python
import sys
sys.path.append(r'C:\Program Files (x86)\Keysight\SD1\Libraries\Python')
from BaseDriver import LabberDriver, Error, IdError
import keysightSD1
import numpy as np
import os
import time
class Driver(LabberDriver):
""" This class implements the Keysight PXI digitizer"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# number of demod blocks in the FPGA
self.num_of_demods = 5
# self.demod_n_pts = self.num_of_demods * 15
self.demod_n_pts = 80
self.bit_stream_name = ''
# set time step and resolution
self.nBit = 16
self.bitRange = float(2**(self.nBit - 1) - 1)
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# get PXI chassis
self.chassis = int(self.dComCfg.get('PXI chassis', 1))
# create AWG instance
self.dig = keysightSD1.SD_AIN()
AWGPart = self.dig.getProductNameBySlot(
self.chassis, int(self.comCfg.address))
self.log('Serial:', self.dig.getSerialNumberBySlot(
self.chassis, int(self.comCfg.address)))
if not isinstance(AWGPart, str):
raise Error('Unit not available')
# check that model is supported
dOptionCfg = self.dInstrCfg['options']
for validId, validName in zip(
dOptionCfg['model_id'], dOptionCfg['model_str']):
if AWGPart.find(validId) >= 0:
# id found, stop searching
break
else:
# loop fell through, raise ID error
raise IdError(AWGPart, dOptionCfg['model_id'])
# set model
self.setModel(validName)
# sampling rate and number of channles is set by model
if validName in ('M3102', 'M3302'):
# 500 MHz models
self.dt = 2E-9
self.nCh = 4
else:
# assume 100 MHz for all other models
self.dt = 10E-9
self.nCh = 4
# create list of sampled data
self.lTrace = [np.array([])] * self.nCh
self.demod_output_ssb = np.zeros((0,), dtype='complex')
self.demod_buffer = np.zeros((0,), dtype=np.int16)
self.dig.openWithSlot(AWGPart, self.chassis, int(self.comCfg.address))
# get hardware version - changes numbering of channels
hw_version = self.dig.getHardwareVersion()
if hw_version >= 4:
# KEYSIGHT - channel numbers start with 1
self.ch_index_zero = 1
else:
# SIGNADYNE - channel numbers start with 0
self.ch_index_zero = 0
self.log('HW:', hw_version)
self.configure_FPGA()
def configure_FPGA(self, reset=False):
"""Load FPGA bitstream and setup triggers"""
self.fpga_config = self.getValue('FPGA Hardware')
if reset or self.fpga_config == 'Only signals':
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Clean_2018-05-31T22_22_11.sbp')
elif self.fpga_config in ('FPGA I/Q and signals', 'Only FPGA I/Q'):
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Demod_v4_IQx5_2018-09-02T19_14_50.sbp')
# don't reload if correct bitstream is already loaded
if bitstream == self.bit_stream_name:
return
if (self.dig.FPGAload(bitstream)) < 0:
if self.fpga_config != 'Only signals':
raise Error('FPGA not loaded, check FPGA version...')
self.bit_stream_name = bitstream
if self.fpga_config != 'Only signals':
for n in range(self.num_of_demods):
LO_freq = self.getValue('LO freq %d' % (n + 1))
self.setFPGALOfreq(n + 1, LO_freq)
self.setFPGATrigger()
def getHwCh(self, n):
"""Get hardware channel number for channel n. n starts at 0"""
return n + self.ch_index_zero
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was called with an error
try:
# flush all memory
for n in range(self.nCh):
self.log('Close ch:', n, self.dig.DAQflush(self.getHwCh(n)))
# remove firmware
self.configure_FPGA(reset=True)
# close instrument
self.dig.close()
except Exception:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# start with setting local quant value
quant.setValue(value)
# if changing FPGA operation, reload firmware
if quant.name == 'FPGA Hardware':
new_value = self.getValue('FPGA Hardware')
# only reload if operation mode changed
if new_value != self.fpga_config:
self.configure_FPGA()
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
# proceed depending on command
if quant.name in ('External Trig Source', 'External Trig Config',
'Trig Sync Mode'):
extSource = int(self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(0, extSource, trigBehavior, sync)
elif quant.name in ('Trig I/O', ):
# get direction and sync from index of comboboxes
direction = int(self.getCmdStringFromValue('Trig I/O'))
self.dig.triggerIOconfig(direction)
elif quant.name in (
'Analog Trig Channel', 'Analog Trig Config', 'Trig Threshold'):
# get trig channel
trigCh = self.getValueIndex('Analog Trig Channel')
mod = int(self.getCmdStringFromValue('Analog Trig Config'))
threshold = self.getValue('Trig Threshold')
self.dig.channelTriggerConfig(self.getHwCh(trigCh), mod, threshold)
elif name in ('Range', 'Impedance', 'Coupling'):
# set range, impedance, coupling at once
rang = self.getRange(ch)
imp = int(self.getCmdStringFromValue('Ch%d - Impedance' % (ch + 1)))
coup = int(self.getCmdStringFromValue('Ch%d - Coupling' % (ch + 1)))
self.dig.channelInputConfig(self.getHwCh(ch), rang, imp, coup)
# FPGA configuration
if quant.name.startswith('LO freq'):
demod_num = int(quant.name[-1])
LO_freq = self.getValue('LO freq ' + str(demod_num))
value = self.setFPGALOfreq(demod_num, LO_freq)
elif quant.name in ('Skip time', 'Integration time'):
self.setFPGATrigger()
return value
def performGetValue(self, quant, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
if (name == 'Signal' or quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
if self.isHardwareLoop(options):
"""Get data from round-robin type averaging"""
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# acquisition was started when arming, just read data
if name == 'Signal':
return quant.getTraceDict(
self.reshaped_traces[ch][seq_no], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
return self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
return quant.getTraceDict(
self.demod_output_vector_I[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
return self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
return quant.getTraceDict(
self.demod_output_vector_Q[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
return quant.getTraceDict(
self.demod_output_vector_ref[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
return self.demod_output_ssb[demod_num, :, seq_no].mean()
elif quant.name.startswith('FPGA Single-shot,'):
return quant.getTraceDict(
self.demod_output_ssb[demod_num, :, seq_no],
dt=1)
# get traces if first call
if self.isFirstCall(options):
# don't arm and measure if in arm/trig mode, was done at arm
if not self.isHardwareTrig(options):
self.getTraces()
# return correct data
if name == 'Signal':
value = quant.getTraceDict(self.lTrace[ch], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
value = self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
value = quant.getTraceDict(
self.demod_output_vector_I[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
value = self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
value = quant.getTraceDict(
self.demod_output_vector_Q[demod_num], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
value = quant.getTraceDict(
self.demod_output_vector_ref[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
value = np.mean(self.demod_output_ssb[demod_num])
elif quant.name.startswith('FPGA Single-shot,'):
# if no records, don't average over number of averages
if self.demod_output_ssb.shape[2] <= 1:
value = quant.getTraceDict(
self.demod_output_ssb[demod_num, :, 0], dt=1)
else:
# records are being used, average over number of averages
value = quant.getTraceDict(
self.demod_output_ssb[demod_num].mean(0), dt=1)
else:
# for all others, return local value
value = quant.getValue()
return value
def performArm(self, quant_names, options={}):
"""Perform the instrument arm operation"""
# only arm digitizer if about to measure read-only values
for name in quant_names:
quant = self.getQuantity(name)
if quant.isPermissionRead():
break
else:
# loop fell through, no read-only quantity, don't arm
return
# arm by calling get traces
if self.isHardwareLoop(options):
# in hardware looping, number of records is set by the looping
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# show status before starting acquisition
self.reportStatus('Digitizer - Waiting for signal')
# get data
self.getTraces(bArm=True, bMeasure=False, n_seq=n_seq)
# report arm completed, to allow client to continue
self.report_arm_completed()
# directly start collecting data (digitizer buffer is limited)
self.getTraces(bArm=False, bMeasure=True, n_seq=n_seq)
# after measurement is done, re-shape data and place in buffer
self.reshaped_traces = []
for trace in self.lTrace:
if len(trace) > 0:
trace = trace.reshape((n_seq, trace.size // n_seq))
self.reshaped_traces.append(trace)
else:
self.getTraces(bArm=True, bMeasure=False)
# report arm completed, to allow client to continue
self.report_arm_completed()
self.getTraces(bArm=False, bMeasure=True)
def getTraces(self, bArm=True, bMeasure=True, n_seq=0):
"""Get all active traces"""
# # test timing
# import time
# t0 = time.clock()
# lT = []
# find out which traces to get
lCh = []
iChMask = 0
for n in range(self.nCh):
if self.fpga_config == 'Only signals':
# normal operation
if self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.append(n)
iChMask += 2**n
elif self.fpga_config == 'FPGA I/Q and signals':
# mixed signal/demod, always enable ch 4 (used for demod)
if (n == 3) or self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.append(n)
iChMask += 2**n
elif self.fpga_config == 'Only FPGA I/Q':
# if only fpga demod, don't read any AWGs but ch 4 (demod)
if n == 3:
lCh.append(n)
iChMask += 2**n
else:
continue
# get current settings
if self.fpga_config in ('Only signals', 'FPGA I/Q and signals'):
nPts = int(self.getValue('Number of samples'))
elif self.fpga_config == 'Only FPGA I/Q':
nPts = self.demod_n_pts
nCyclePerCall = int(self.getValue('Records per Buffer'))
# in hardware loop mode, ignore records and use number of sequences
if n_seq > 0:
nSeg = n_seq
else:
nSeg = int(self.getValue('Number of records'))
nAv = int(self.getValue('Number of averages'))
# trigger delay is in 1/sample rate
nTrigDelay = int(round(self.getValue('Trig Delay') / self.dt))
# special high-speed FPGA mode, don't convert, just transfer
if (self.fpga_config == 'Only FPGA I/Q' and
self.getValue('Hide I/Q') and
not self.getValue('Convert data while streaming')):
only_transfer_fgpa = True
else:
only_transfer_fgpa = False
if bArm:
# clear old data
self.dig.DAQflushMultiple(iChMask)
self.lTrace = [np.array([])] * self.nCh
self.smsb_info_str = []
self.demod_counter = 0
# only re-allocate large output matrix if necessary (slow)
if self.demod_output_ssb.size != (self.num_of_demods * nSeg * nAv):
self.demod_output_ssb = np.zeros(
(self.num_of_demods, nSeg * nAv), dtype='complex')
else:
# matrix has right size, just reshape
self.demod_output_ssb = self.demod_output_ssb.reshape(
(self.num_of_demods, nSeg * nAv))
# create new binary demod data buffer, if size changed
buf = (nPts * nSeg * nAv) if only_transfer_fgpa else (nPts * nSeg)
if self.demod_buffer.size != buf:
self.demod_buffer = np.zeros(buf, dtype=np.int16)
# only initiate diagnostic traces if in use
if not self.getValue('Hide I/Q'):
self.demod_output_vector_I = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_I = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_Q = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_Q = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_ref = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_ref = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_SSB = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_NP = np.zeros(
[self.num_of_demods, nSeg])
self.demod_output_NP = np.zeros(self.num_of_demods)
self.moment_I2 = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.moment_Q2 = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
# configure trigger for all active channels
for nCh in lCh:
self.lTrace[nCh] = np.zeros((nSeg * nPts))
# channel number depens on hardware version
ch = self.getHwCh(nCh)
# extra config for trig mode
if self.getValue('Trig Mode') == 'Digital trigger':
extSource = int(
self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(
ch, extSource, trigBehavior, sync)
self.dig.DAQdigitalTriggerConfig(
ch, extSource, trigBehavior)
elif self.getValue('Trig Mode') == 'Analog channel':
digitalTriggerMode = 0
digitalTriggerSource = 0
trigCh = self.getValueIndex('Analog Trig Channel')
analogTriggerMask = 2**trigCh
#analogTriggerMask = int('1111',2)
self.dig.DAQdigitalTriggerConfig(
ch, digitalTriggerSource, digitalTriggerMode)
self.dig.DAQanalogTriggerConfig(
ch, analogTriggerMask)
# config daq and trig mode
trigMode = int(self.getCmdStringFromValue('Trig Mode'))
self.dig.DAQconfig(ch, nPts, nSeg * nAv, nTrigDelay, trigMode) # TODO change nPts
# start acquiring data
self.dig.DAQstartMultiple(iChMask)
#self.wait(1)
# lT.append('Start %.1f ms' % (1000*(time.clock()-t0)))
#
# return if not measure
if not bMeasure:
return
# define number of cycles to read at a time
nCycleTotal = nSeg * nAv
nCall = int(np.ceil(nCycleTotal / nCyclePerCall))
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
# keep track of progress in percent
old_percent = -1
# self.log('nCall:' + str(nCall), level = 30)
# proceed depending on segment or not segment
if only_transfer_fgpa:
# just transfer fpga data, do conversion after to allow fast stream
ch = self.getHwCh(3)
count = 0
for n in range(nCall):
# number of cycles for this call, could be fewer for last call
nCycle = min(nCyclePerCall, nCycleTotal - (n * nCyclePerCall))
# channel number depens on hardware version
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall)) # TODO change nPts
# stop if no data
if data.size == 0:
return
# store data in long vector, convert later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# report progress, only report integer percent
if nCall >= 1:
new_percent = int(100 * n / nCall)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# finally, get demod values
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
elif nSeg <= 1:
# non-segmented acquisiton
for n in range(nCall):
# number of cycles for this call, could be fewer for last call
nCycle = min(nCyclePerCall, nCycleTotal - (n * nCyclePerCall))
# self.log('nCycle:' + str(nCycle), level = 30)
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall))
# stop if no data
if data.size == 0:
return
# different operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# average
data = data.reshape((nCycle, nPts)).mean(0)
# adjust scaling to account for summing averages
scale = lScale[nCh] * (nCycle / nAv)
# convert to voltage, add to total average
self.lTrace[nCh] += data * scale
else:
# for demod, immediately get demodulated values
self.getDemodValues(data, nPts, nSeg, nCycle)
# report progress, only report integer percent
if nCall >= 1:
new_percent = int(100 * n / nCall)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# lT.append('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
else:
# segmented acquisition, get calls per segment
(nCallSeg, extra_call) = divmod(nSeg, nCyclePerCall)
# pre-calculate list of cycles/call, last call may have more cycles
if nCallSeg == 0:
nCallSeg = 1
lCyclesSeg = [nSeg]
else:
lCyclesSeg = [nCyclePerCall] * nCallSeg
lCyclesSeg[-1] = nCyclePerCall + extra_call
# pre-calculate scale, should include scaling for averaging
lScale = np.array(lScale, dtype=float) / nAv
for n in range(nAv):
count = 0
# loop over number of calls per segment
for m, nCycle in enumerate(lCyclesSeg):
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall))
# stop if no data
if data.size == 0:
return
# different operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# standard operation, store data in one long vector
self.lTrace[nCh][count:(count + data.size)] += \
data * lScale[nCh]
else:
# store raw demod data, will be extracted later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# after one full set of records, convert demod data
if self.fpga_config != 'Only signals':
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
# report progress, only report integer percent
if nAv >= 1:
new_percent = int(100 * n / nAv)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# at the end, convert binary data to I/Q values
if self.fpga_config != 'Only signals':
self.demod_output_ssb = self.demod_output_ssb.reshape(
(self.num_of_demods, nAv, nSeg))
# lT.append('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
# # log timing info
# self.log(': '.join(lT))
def getRange(self, ch):
"""Get channel range, as voltage. Index start at 0"""
rang = float(self.getCmdStringFromValue('Ch%d - Range' % (ch + 1)))
# range depends on impedance
if self.getValue('Ch%d - Impedance' % (ch + 1)) == 'High':
rang = rang * 2
# special case if range is .25, 0.5, or 1, scale to 0.2, .4, .8
if rang < 1.1:
rang *= 0.8
return rang
def DAQread(self, dig, nDAQ, nPoints, timeOut):
"""Read data diretly to numpy array"""
if dig._SD_Object__handle > 0:
if nPoints > 0:
data = (keysightSD1.c_short * nPoints)()
nPointsOut = dig._SD_Object__core_dll.SD_AIN_DAQread(
dig._SD_Object__handle, nDAQ, data, nPoints, timeOut)
if nPointsOut > 0:
return np.frombuffer(data, dtype=np.int16, count=nPoints)
else:
return np.array([], dtype=np.int16)
else:
return keysightSD1.SD_Error.INVALID_VALUE
else:
return keysightSD1.SD_Error.MODULE_NOT_OPENED
def getDemodValues(self, demod_raw, nPts, nSeg, nCycle):
"""get Demod IQ data from Ch1/2/3 Trace"""
accum_length = self.getValue('Integration time')
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
self.smsb_info_str = []
nDemods = self.num_of_demods
use_phase_ref = self.getValue('Use phase reference signal')
for n in range(nDemods):
y1_lsb = demod_raw[((n * 15) + 0)::nPts]
y1_msb = demod_raw[((n * 15) + 1)::nPts]
x1_lsb = demod_raw[((n * 15) + 2)::nPts]
x1_msb = demod_raw[((n * 15) + 3)::nPts]
y1x1_smsb = demod_raw[((n * 15) + 4)::nPts]
x1_smsb = y1x1_smsb.astype('int8')
y1_smsb = y1x1_smsb.astype('int16') >> 8
y2_lsb = demod_raw[((n * 15) + 5)::nPts]
y2_msb = demod_raw[((n * 15) + 6)::nPts]
x2_lsb = demod_raw[((n * 15) + 7)::nPts]
x2_msb = demod_raw[((n * 15) + 8)::nPts]
y2x2_smsb = demod_raw[((n * 15) + 9)::nPts]
x2_smsb = y2x2_smsb.astype('int8')
y2_smsb = y2x2_smsb.astype('int16') >> 8
y1_int64 = (
y1_lsb.astype('uint16') + y1_msb.astype('uint16') * (2 ** 16) +
y1_smsb.astype('int8') * (2**32))
x1_int64 = (
x1_lsb.astype('uint16') + x1_msb.astype('uint16') * (2 ** 16) +
x1_smsb.astype('int8') * (2**32))
y2_int64 = (
y2_lsb.astype('uint16') + y2_msb.astype('uint16') * (2 ** 16) +
y2_smsb.astype('int8') * (2**32))
x2_int64 = (
x2_lsb.astype('uint16') + x2_msb.astype('uint16') * (2 ** 16) +
x2_smsb.astype('int8') * (2**32))
smsb_info = [np.max(np.abs(x1_smsb)), np.max(np.abs(y1_smsb)),
np.max(np.abs(x2_smsb)), np.max(np.abs(y2_smsb))]
smsb_temp_info_str = str(int(max(smsb_info) / 1.24)) + '%'
self.smsb_info_str.append(smsb_temp_info_str)
warning_thr = 124 # warning indication that overflow can occur
if np.any(np.array(smsb_info)) > warning_thr:
warning_str = (
'Warning! overflow may occur in FPGA demod block: %d, %s' %
(n, str(smsb_info)))
self.log(warning_str, level=30)
demod_temp_I = (
(x1_int64.astype('int64') + 1j * y1_int64.astype('int64')) /
2**43 / accum_length * lScale[0])
demod_temp_Q = (
(x2_int64.astype('int64') + 1j * y2_int64.astype('int64')) /
2**43 / accum_length * lScale[1])
# store final values in large array, get indices for current call
k = self.demod_counter
n_values = demod_temp_I.size
if self.getValue('LO freq %d' % (n + 1)) <= 0:
self.demod_output_ssb[n, k:(k + n_values)] = 0.5 * (
np.real(demod_temp_I) + np.imag(demod_temp_Q) -
1j * (np.imag(demod_temp_I) - np.real(demod_temp_Q))
)
else:
self.demod_output_ssb[n, k:(k + n_values)] = 0.5 * (
np.real(demod_temp_I) - np.imag(demod_temp_Q) +
1j * (np.imag(demod_temp_I) + np.real(demod_temp_Q))
)
# self.demod_output_ssb[n] = np.real(self.demod_output_vector_I[n]) - np.imag(self.demod_output_vector_Q[n]) - 1j*(np.imag(self.demod_output_vector_I[n]) + np.real(self.demod_output_vector_Q[n]))
if use_phase_ref or (not self.getValue('Hide I/Q')):
# extract reference signal
y3_lsb = demod_raw[((n * 15) + 10)::nPts]
y3_msb = demod_raw[((n * 15) + 11)::nPts]
x3_lsb = demod_raw[((n * 15) + 12)::nPts]
x3_msb = demod_raw[((n * 15) + 13)::nPts]
y3x3_smsb = demod_raw[((n * 15) + 14)::nPts]
x3_smsb = y3x3_smsb.astype('int8')
y3_smsb = y3x3_smsb.astype('int16') >> 8
y3_int64 = (
y3_lsb.astype('uint16') +
y3_msb.astype('uint16') * (2 ** 16) +
y3_smsb.astype('int8') * (2**32))
x3_int64 = (
x3_lsb.astype('uint16') +
x3_msb.astype('uint16') * (2 ** 16) +
x3_smsb.astype('int8') * (2**32))
demod_temp_ref = (
(x3_int64.astype('int64') + 1j * y3_int64.astype('int64')) /
2**43 / accum_length * lScale[2])
# subtract the reference angle
if use_phase_ref:
ref = np.arctan2(demod_temp_ref.imag, demod_temp_ref.real)
self.demod_output_ssb[n, k:(k + n_values)] /= (
np.cos(ref) - 1j * np.sin(ref))
# if advanced values not in use, don't calculate to save time
if self.getValue('Hide I/Q'):
continue
nAv = self.getValue('Number of averages')
if nSeg <= 1:
demod_temp_I = demod_temp_I.reshape((nCycle, 1)).mean(0)
demod_temp_Q = demod_temp_Q.reshape((nCycle, 1)).mean(0)
demod_temp_ref = demod_temp_ref.reshape((nCycle, 1)).mean(0)
self.demod_output_vector_I[n] += demod_temp_I / nAv * nCycle
self.demod_output_vector_Q[n] += demod_temp_Q / nAv * nCycle
self.demod_output_vector_ref[n] += demod_temp_ref / nAv * nCycle
self.moment_I2[n] += np.power(
np.abs(demod_temp_I), 2) / nAv * nCycle
self.moment_Q2[n] += np.power(
| np.abs(demod_temp_Q) | numpy.abs |
# coding: utf-8
## Trains a One vs. Rest SVM classifier on the fisher vector video outputs.
import os, sys, collections, random, string
import numpy as np
from tempfile import TemporaryFile
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn import svm
from sklearn.multiclass import OneVsRestClassifier
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import logging
logging.basicConfig(filename='output.log',level=logging.DEBUG)
#class_index_file = "/Users/Bryan/CS/CS_Research/code/CS221/class_index.npz"
class_index_file = "/home/parallels/Documents/idt_fv/class_index.npz"
class_index_file_loaded = np.load(class_index_file)
class_index = class_index_file_loaded['class_index'][()]
index_class = class_index_file_loaded['index_class'][()]
#training_output = '/Users/Bryan/CS/CS_Research/code/CS221/UCF101_Fishers/train'
#testing_output = '/Users/Bryan/CS/CS_Research/code/CS221/UCF101_Fishers/test'
training_output = '/home/parallels/Documents/idt_fv/data/UCF101_Fishers/train'
testing_output = '/home/parallels/Documents/idt_fv/data/UCF101_Fishers/test'
################################################################
# Useful Helper functions #
################################################################
#Transforms a list of videos into a dictionary of video name (in lower case) to list of videos.
def toDict(videos):
videos_by_class = dict()
for video in videos:
#we assume each of the videos has the following format:
# v_BasketballDunk_g02_c02.fisher.npz
name = string.lower(video.split('_')[1])
if name not in videos_by_class:
videos_by_class[name] = []
videos_by_class[name].append(video)
return videos_by_class
# videos: dictionary of class_name to fisher.npz files
# percentage: percent of the data that is used for testing. default is 20%
# Returns two lists:
# 1. Training data (which will always include the first few entries from each category since it is assumed
# that these videos helped contruct the gmm)
# 2. Testing data.
def split_inputData(videos, percentage):
training_data = []
testing_data = []
for category in videos:
num = len(videos[category])
num_testing = int(round(percentage*num))
# always add the first two elements to the training data
training_data += videos[category][0:2]
remaining_vids = videos[category][2:]
random.shuffle(remaining_vids)
testing_data += remaining_vids[0:num_testing]
training_data += remaining_vids[num_testing:]
return (training_data, testing_data)
#Returns:
# 1. np.ndarray where of verticallys stacked fisher vectors.
# 2. np.array of class labels
# Inputs:
# videos: is a list of fisher.npz files
# fisher_path: path to the fisher vector directory
# class_index: dictionary from video name to the class.
def make_FV_matrix(videos, fisher_path, class_index):
matrix = []
target = []
for video in videos:
vid_path = os.path.join(fisher_path,video)
matrix.append(np.load(vid_path)['fish'])
name = string.lower(video.split('_')[1])
target.append(class_index[name])
X = np.vstack(matrix)
Y = np.array(target)
return (X,Y)
#Given a dictionary of 'Class name' to list of .fisher files,
#Returns
# 1. list of .fisher files of 'K' from each class.
# 2. Class number of each .fisher file
def cut_inputData(vid_dict, K):
vids = []
for k,v in vid_dict.iteritems():
vids.extend(v[:K])
return vids
#Given a dictionary of 'Class name' to list of .fisher files,
#Returns
# 1. list of .fisher files of 'K' from each class.
# 2. Class number of each .fisher file
def cut_inputData2(vid_dict, K, class_index):
vids = []
targets = []
for k,v in vid_dict.iteritems():
vids.extend(v[:K])
targets.extend(K*[class_index[k]])
return (vids,targets)
#Returns the Mean Average Precision (mAP) to evaluate the performance of a run
#Arguments:
# 1. classifier such as sklearn.multiclass.OneVsRestClassifier
# 2. X_test: data to classify
# 3. Y_test: class labels.
# Returns: (mAP, [aps])
def metric_mAP(classifier, X_test, Y_test, verbose=False):
estimators = classifier.estimators_
classes = classifier.classes_
aps = []
for estimator,class_num in zip(estimators, classes):
aps.append(metric_AP(estimator, class_num, X_test, Y_test, verbose=verbose))
map_val = np.mean(aps)
if verbose: logging.info("mean AP = %.3f" % map_val)
return map_val
#Average Precision
def metric_AP(estimator, class_num, X_test, Y_test, verbose=False):
scores = estimator.decision_function(X_test)
#Sorted list of (original_index,score) tuples.
scores_sorted = sorted(enumerate(scores), key=lambda x:x[1], reverse=True)
# collect the positive results in the dataset
positive_ranks = [i for i,score in enumerate(scores_sorted) if Y_test[score[0]]==class_num]
# accumulate trapezoids with this basis
recall_step = 1.0 / len(positive_ranks)
ap = 0
for ntp,rank in enumerate(positive_ranks):
# ntp = nb of true positives so far
# rank = nb of retrieved items so far
# y-size on left side of trapezoid:
precision_0 = ntp/float(rank) if rank > 0 else 1.0
# y-size on right side of trapezoid:
precision_1 = (ntp + 1) / float(rank + 1)
ap += (precision_1 + precision_0) * recall_step / 2.0
if verbose: logging.info("class %d, AP = %.3f" % (class_num, ap))
return ap
#For a sklearn.multiclass.OneVsRestClassifier, calculate the mAP (mean interpolated average precision),
# accuracy score, and average Precision
def metric_scores(classifier, X_test, Y_test, verbose=False):
mAP = metric_mAP(classifier, X_test, Y_test,verbose=verbose)
X_test_predictions = classifier.predict(X_test)
accuracy_score = metrics.accuracy_score(Y_test, X_test_predictions)
avg_Precision = metrics.precision_score(Y_test, X_test_predictions, average='macro')
avg_Recall = metrics.recall_score(Y_test, X_test_predictions, average='macro')
return (mAP, accuracy_score, avg_Precision, avg_Recall)
#Need to get T training and all testing videos for a limited number 'C' classes.
def limited_input(training_dict, testing_dict, C, T):
tkeys_overC = [k for k in training_dict.keys() if len(training_dict[k]) >= T]
sampleClasses = random.sample(tkeys_overC,C)
train_vids = []
test_vids = []
for vid_class in sampleClasses:
train_vids.extend(training_dict[vid_class][:T])
test_vids.extend(testing_dict[vid_class])
return (train_vids, test_vids)
#Need to get T training videos for all classes in the input dictionary
def limited_input1(input_dict, T):
vids = []
for k,v in input_dict.iteritems():
if len(v) <= T:
vids.extend(v)
else:
vids.extend(random.sample(v,T))
return vids
#Helper methods for plotting metrics.
#plot_confusion_matrix(Y_test, X_test_predictions)
def plot_confusion_matrix(y_test, y_pred):# Compute confusion matrix
cm = metrics.confusion_matrix(y_test, y_pred)
print(cm)
# Show confusion matrix in a separate window
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
#Returns a PCA matrix.
#Arguments:
# 1. inputX: input data matrix
# 2. n_samples: Number of rows from the inputX matrix to sample to construct PCA matrix.
# 3. pca_dim: Number of PCA components to retain. This will be the reduced feature dimension
# of the input matrix.
# Returns the PCA transform matrix.
# use pca_transform as np.dot(inputX, pca_transform)
#X_train_PCA = np.dot(X_train, pca_transform)
#X_test_PCA = np.dot(X_test, pca_transform)
def train_PCA(inputX, n_samples, pca_dim):
n_samples = min(n_samples, inputX.shape[0])
sample_indices = np.random.choice(inputX.shape[0], n_samples)
sample = inputX[sample_indices]
mean = sample.mean(axis = 0) #for each row
sample = sample - mean
cov = np.dot(sample.T, sample)
#eigvecs are normalized.
orig_comps = inputX.shape[1]
eigvals, eigvecs = np.linalg.eig(cov)
perm = eigvals.argsort() # sort by increasing eigenvalue
pca_transform = eigvecs[:, perm[orig_comps-pca_dim:orig_comps]] # eigenvectors for the 64 last eigenvalues
return pca_transform
from sklearn.model_selection import cross_validate
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = | np.mean(test_scores, axis=1) | numpy.mean |
import time
import json
import tqdm
from ltron.ldraw.documents import LDrawMPDMainFile
from ltron.bricks.brick_scene import BrickScene
from pathlib import Path
# from ltron.gym.components.scene import SceneComponent
from ltron.gym.components.episode import MaxEpisodeLengthComponent
from ltron.gym.components.render import ColorRenderComponent
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
# from ltron.gym.ltron_env import LtronEnv
from ltron.gym.components.viewpoint import (
ControlledAzimuthalViewpointComponent,
RandomizedAzimuthalViewpointComponent,
FixedAzimuthalViewpointComponent)
from ltron.bricks.brick_shape import BrickShape
import copy
import collections
import math
import os
import numpy
import random
import tqdm
def compute_boxsize_old(instances, scene):
instance_tran = {}
for k in instances:
instance_tran[k] = scene.instances.instances[k].transform
instance_pos = {}
for k in instances:
instance_pos[k] = scene.instances.instances[k].brick_shape.bbox
point = []
for ins, bbox in instance_pos.items():
minb = bbox[0]
maxb = bbox[1]
point.append((numpy.matmul(instance_tran[ins], numpy.array([minb[0], minb[1], minb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([maxb[0], minb[1], minb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([minb[0], maxb[1], minb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([minb[0], minb[1], maxb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([maxb[0], maxb[1], minb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([minb[0], maxb[1], maxb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([maxb[0], minb[1], maxb[2], 1])))[:3])
point.append((numpy.matmul(instance_tran[ins], numpy.array([maxb[0], maxb[1], maxb[2], 1])))[:3])
min_y = 100000
max_y = -1000000
min_x = 100000
max_x = -100000
min_z = 100000
max_z = -1000000
for p in point:
if p[1] > max_y:
max_y = p[1]
if p[1] < min_y:
min_y = p[1]
if p[0] > max_x:
max_x = p[0]
if p[0] < min_x:
min_x = p[0]
if p[2] > max_z:
max_z = p[2]
if p[2] < min_z:
min_z = p[2]
# if abs(max_y - min_y) - 35 > 0: return 10000
# else: return -1000
return max(abs(max_y - min_y), abs(max_x - min_x), abs(max_z - min_z))
# The complete parameter is somehow not working
def compute_boxsize(instance, scene, complete=False):
if complete:
all_vertices = numpy.concatenate([scene.instances[ins].bbox_vertices for ins in scene.instances])
else:
all_vertices = numpy.concatenate([
scene.instances[i].bbox_vertices() for i in instance], axis=1)
bbox_min = | numpy.min(all_vertices[:3], axis=1) | numpy.min |
import ismrmrd
import os
import itertools
import logging
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import xml.dom.minidom
import base64
import ctypes
import re
import mrdhelper
# Folder for debug output files
debugFolder = "/tmp/share/debug"
def process(connection, config, metadata):
logging.info("Config: \n%s", config)
# Continuously parse incoming data parsed from MRD messages
acqGroup = []
imgGroup = []
try:
for item in connection:
# ----------------------------------------------------------
# Raw k-space data messages
# ----------------------------------------------------------
if isinstance(item, ismrmrd.Acquisition):
# Accumulate all imaging readouts in a group
if (not item.is_flag_set(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) and
not item.is_flag_set(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION) and
not item.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA)):
acqGroup.append(item)
# When this criteria is met, run process_raw() on the accumulated
# data, which returns images that are sent back to the client.
if item.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE):
logging.info("Processing a group of k-space data")
image = process_raw(acqGroup, config, metadata)
connection.send_image(image)
acqGroup = []
# ----------------------------------------------------------
# Image data messages
# ----------------------------------------------------------
if isinstance(item, ismrmrd.Image):
# Only process magnitude images -- send phase images back without modification (fallback for images with unknown type)
if (item.image_type is ismrmrd.IMTYPE_MAGNITUDE) or (item.image_type == 0):
imgGroup.append(item)
else:
tmpMeta = ismrmrd.Meta.deserialize(item.attribute_string)
tmpMeta['Keep_image_geometry'] = 1
item.attribute_string = tmpMeta.serialize()
connection.send_image(item)
continue
# Images and waveform data are not supported in this example
elif isinstance(item, ismrmrd.Acquisition) or isinstance(item, ismrmrd.Waveform):
continue
elif item is None:
break
else:
logging.error("Unsupported data type %s", type(item).__name__)
if len(imgGroup) > 0:
logging.info("Processing a group of images (untriggered)")
image = process_image(imgGroup, config, metadata)
connection.send_image(image)
imgGroup = []
finally:
connection.send_close()
def process_raw(group, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
# Format data into single [cha PE RO phs] array
lin = [acquisition.idx.kspace_encode_step_1 for acquisition in group]
phs = [acquisition.idx.phase for acquisition in group]
# Use the zero-padded matrix size
data = np.zeros((group[0].data.shape[0],
metadata.encoding[0].encodedSpace.matrixSize.y,
metadata.encoding[0].encodedSpace.matrixSize.x,
max(phs)+1),
group[0].data.dtype)
rawHead = [None]*(max(phs)+1)
for acq, lin, phs in zip(group, lin, phs):
if (lin < data.shape[1]) and (phs < data.shape[3]):
# TODO: Account for asymmetric echo in a better way
data[:,lin,-acq.data.shape[1]:,phs] = acq.data
# center line of k-space is encoded in user[5]
if (rawHead[phs] is None) or (np.abs(acq.getHead().idx.kspace_encode_step_1 - acq.getHead().idx.user[5]) < np.abs(rawHead[phs].idx.kspace_encode_step_1 - rawHead[phs].idx.user[5])):
rawHead[phs] = acq.getHead()
# Flip matrix in RO/PE to be consistent with ICE
data = np.flip(data, (1, 2))
logging.debug("Raw data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "raw.npy", data)
# Fourier Transform
data = fft.fftshift( data, axes=(1, 2))
data = fft.ifft2( data, axes=(1, 2))
data = fft.ifftshift(data, axes=(1, 2))
# Sum of squares coil combination
# Data will be [PE RO phs]
data = np.abs(data)
data = np.square(data)
data = np.sum(data, axis=0)
data = np.sqrt(data)
logging.debug("Image data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "img.npy", data)
# Normalize and convert to int16
data *= 32767/data.max()
data = np.around(data)
data = data.astype(np.int16)
# Remove readout oversampling
offset = int((data.shape[1] - metadata.encoding[0].reconSpace.matrixSize.x)/2)
data = data[:,offset:offset+metadata.encoding[0].reconSpace.matrixSize.x]
# Remove phase oversampling
offset = int((data.shape[0] - metadata.encoding[0].reconSpace.matrixSize.y)/2)
data = data[offset:offset+metadata.encoding[0].reconSpace.matrixSize.y,:]
logging.debug("Image without oversampling is size %s" % (data.shape,))
np.save(debugFolder + "/" + "imgCrop.npy", data)
# Format as ISMRMRD image data
imagesOut = []
for phs in range(data.shape[2]):
# Create new MRD instance for the processed image
# NOTE: from_array() takes input data as [x y z coil], which is
# different than the internal representation in the "data" field as
# [coil z y x], so we need to transpose
tmpImg = ismrmrd.Image.from_array(data[...,phs].transpose())
# Set the header information
tmpImg.setHead(mrdhelper.update_img_header_from_raw(tmpImg.getHead(), rawHead[phs]))
tmpImg.field_of_view = (ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.x),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.y),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.z))
tmpImg.image_index = phs
# Set ISMRMRD Meta Attributes
tmpMeta = ismrmrd.Meta()
tmpMeta['DataRole'] = 'Image'
tmpMeta['ImageProcessingHistory'] = ['FIRE', 'PYTHON']
tmpMeta['WindowCenter'] = '16384'
tmpMeta['WindowWidth'] = '32768'
tmpMeta['Keep_image_geometry'] = 1
xml = tmpMeta.serialize()
logging.debug("Image MetaAttributes: %s", xml)
tmpImg.attribute_string = xml
imagesOut.append(tmpImg)
# Call process_image() to create RGB images
imagesOut = process_image(imagesOut, config, metadata)
return imagesOut
def process_image(images, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
logging.debug("Processing data with %d images of type %s", len(images), ismrmrd.get_dtype_from_data_type(images[0].data_type))
# Extract image data into a 5D array of size [img cha z y x]
data = np.stack([img.data for img in images])
head = [img.getHead() for img in images]
meta = [ismrmrd.Meta.deserialize(img.attribute_string) for img in images]
# Reformat data to the more intuitive [x y z cha img]
data = data.transpose()
# Reformat data again to [y x z cha img], i.e. [row col] for the first two
# dimensions. Note we will need to undo this later prior to sending back
# to the client
data = data.transpose((1, 0, 2, 3, 4))
# Display MetaAttributes for first image
logging.debug("MetaAttributes[0]: %s", ismrmrd.Meta.serialize(meta[0]))
# Optional serialization of ICE MiniHeader
if 'IceMiniHead' in meta[0]:
logging.debug("IceMiniHead[0]: %s", base64.b64decode(meta[0]['IceMiniHead']).decode('utf-8'))
logging.debug("Original image data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "imgOrig.npy", data)
if data.shape[3] != 1:
logging.error("Multi-channel data is not supported")
return []
# Normalize to (0.0, 1.0) as expected by get_cmap()
data = data.astype(float)
data -= data.min()
data *= 1/data.max()
# Apply colormap
cmap = plt.get_cmap('jet')
rgb = cmap(data)
# Remove alpha channel
# Resulting shape is [row col z rgb img]
rgb = rgb[...,0:-1]
rgb = rgb.transpose((0, 1, 2, 5, 4, 3))
rgb = | np.squeeze(rgb, 5) | numpy.squeeze |
import scipy
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import cv2
from utils import *
from os import listdir
from os.path import isfile, join, isdir
class AU2Emotion():
#https://imotions.com/blog/facial-action-coding-system/
def __init__(self):
self.aus = ["AU01_c" , "AU02_c" , "AU04_c",
"AU05_c", "AU06_c", "AU07_c", "AU09_c",
"AU10_c", "AU12_c", "AU14_c", "AU15_c",
"AU17_c" , "AU20_c" , "AU23_c", "AU25_c",
"AU26_c" , "AU45_c"]
self.au2idx = {1:0, 2:1, 4:2,
5:3, 6:4, 7:5, 9:6,
10:7, 12:8, 14:9, 15:10,
17:11, 20:12, 23:13, 25:14,
26:15, 45:17}
self.emotion2au = {"joy": [6,12],
"sadness": [1, 4, 15],
"surprise": [1, 2, 5, 26],
"fear": [1,2,4,5,7,20,26],
"anger": [4, 5, 7, 23],
#"disgust": [9,15,16],
"contempt": [12,14]}
def emotion2aus(self,emotion,batch_size):
au_list = self.emotion2au[emotion]
au_list_idx = [self.au2idx[i] for i in au_list]
au_vect = np.zeros((batch_size,len(self.aus)))
for i in range(len(self.aus)):
if i in au_list_idx:
au_vect[:,i] = 1
return au_vect
def get_idx(self,lab_vect,emotion):
au_list = self.emotion2au[emotion]
au_list_idx = [self.au2idx[i] for i in au_list]
idx = np.argwhere(np.take(lab_vect,au_list_idx,axis=1).sum(axis=1)==len(au_list_idx))
return idx
def get_00000_num(num):
if num < 10:
return '00000'+str(num)
elif num < 100:
return '0000'+str(num)
elif num < 1000:
return '000'+str(num)
elif num < 10000:
return '00'+str(num)
elif num < 100000:
return '0'+str(num)
elif num < 1000000:
return str(num)
else:
raise Exception("number too high:"+str(num))
class InMemoryDataLoader():
def __init__(self,
dataset_name,
img_res=(112, 112,3),
root_data_path=None,
# path_image_dir=None,
normalize=True,
# csv_columns = ['frame', 'AU01_r', 'AU02_r', 'AU04_r', 'AU05_r', 'AU06_r', 'AU07_r', 'AU09_r', 'AU10_r',
# 'AU12_r', 'AU14_r', 'AU15_r', 'AU17_r', 'AU20_r', 'AU23_r', 'AU25_r', 'AU26_r', 'AU45_r',
# 'AU01_c', 'AU02_c', 'AU04_c', 'AU05_c', 'AU06_c', 'AU07_c', 'AU09_c', 'AU10_c', 'AU12_c',
# 'AU14_c', 'AU15_c', 'AU17_c', 'AU20_c', 'AU23_c', 'AU25_c', 'AU26_c', 'AU28_c', 'AU45_c'],
csv_columns = ['frame', 'AU01_r', 'AU02_r', 'AU04_r', 'AU05_r', 'AU06_r', 'AU07_r', 'AU09_r', 'AU10_r',
'AU12_r', 'AU14_r', 'AU15_r', 'AU17_r', 'AU20_r', 'AU23_r', 'AU25_r', 'AU26_r', 'AU45_r'],
max_images=-1,
image_patter_fn = 'frame_det_00_FRAME_ID.bmp'):
self.dataset_name = dataset_name
self.img_res = img_res
self.root_data_path = root_data_path
#self.path_csv = path_csv
#self.path_image_dir = path_image_dir
self.csv_columns = csv_columns
self.max_images = max_images
self.image_patter_fn = image_patter_fn # image_patter_fn.replace('FRAME_ID','1')
##
self.normalize = normalize
## load dataset
self._load_internally()
def gen_rand_cond(self,batch_size=1,add_noise=False):
idx = np.random.choice(self.lab_vect.shape[0],size=batch_size)
cond = self.lab_vect[idx]
if add_noise:
cond += np.random.uniform(-0.1, 0.1, cond.shape)
cond = np.clip(a=cond,a_min=0,a_max=5)
return cond
def gen_rand_cond_for_binary_au(self,bt):
au_num = bt.shape[1]
alist = []
for i in range(au_num):
_bt = bt.copy()
_bt[:,i] = np.ones_like(_bt[:,i]) - _bt[:,i]
alist.append(_bt)
#cond = np.concatenate(alist,axis=0)
return alist
def _process_data_dir(self,
im_dir,
other_dir='processed',
csv_fn='EmotioNet.csv',
img_dirn='EmotioNet_aligned'):
labels = pd.read_csv(join(self.root_data_path,im_dir,other_dir,csv_fn))
labels.columns = [i.strip() for i in labels.columns]
print(">> removing", | np.sum(labels['success']==0) | numpy.sum |
"""
Created on May 22, 2018
@author: Moritz
"""
import numpy as np
def sample_identity_node(node, n_samples, rand_gen=None, ranges=None):
if ranges is None or ranges[node.scope[0]] is None:
return rand_gen.choice(node.vals, n_samples)
else:
# Generate bins for the specified range
rang = ranges[node.scope[0]]
# Iterate over the specified ranges
intervals = rang.get_ranges()
probs = np.zeros(len(intervals))
bin_vals = []
for i, interval in enumerate(intervals):
if len(interval) == 1:
lower = | np.searchsorted(node.vals, interval[0], side="left") | numpy.searchsorted |
"""
desispec.quicklook.qlresolution
===============================
Quicklook version of resolution object that can
calculate resolution efficiently from psf information
Author: <NAME>
"""
import numpy as np
import scipy.sparse
import scipy.special
class QuickResolution(scipy.sparse.dia_matrix):
"""
Quicklook version of the resolution mimicking desispec.resolution.Resolution
with some reduction in dimentionality. Contains code from Resolution implementation
Note that this is similar to desispec.resolution.Resolution, though faster and differing
in implementation details that should be cross checked before merging these
or replacing one with the other
"""
def __init__(self,mu=None,sigma=None,wdict=None,waves=None,ndiag=9):
self.__ndiag=ndiag
if ndiag & 0x1 == 0:
raise ValueError("Need odd numbered diagonals, got %d"%ndiag)
def _binIntegral(x,mu=None,sigma=None):
"""
x: bin boundaries vector (self.__ndiag,)
mu: means vector of shape[nwave,1]
sigma: sigmas of shape[nwave,1]
"""
nvecs=1
if sigma is not None:
nvecs=sigma.shape[0]
if mu is None:
mu=np.zeros((nvecs,1))
if sigma is None:
sigma=np.ones(mu.shape)*0.5
sx=(np.tile(x,(mu.shape[0],1))-mu)/(sigma* | np.sqrt(2) | numpy.sqrt |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = | np.array([]) | numpy.array |
# V-REP as tethered robotics simulation environment
# Python Wrapper
# <NAME> 20170410
# import the vrep library
from vrepper.lib import vrep
from vrepper.lib.vrepConst import sim_handle_all, simx_headeroffset_server_state, \
sim_scripttype_childscript
from inspect import getargspec
import types
import numpy as np
import socket
from contextlib import closing
from vrepper.utils import check_ret, blocking, oneshot, instance, deprecated
from vrepper.vrep_object import vrepobject
class vrepper(object):
""" class holding a v-rep simulation environment and
allowing to call all the V-Rep remote functions ("simx..")
"""
def __init__(self, port_num=None, dir_vrep='', headless=False, suppress_output=True):
if port_num is None:
port_num = self.find_free_port_to_use()
self.port_num = port_num
if dir_vrep == '':
print('(vrepper) trying to find V-REP executable in your PATH')
import distutils.spawn as dsp
path_vrep = dsp.find_executable('vrep.sh') # fix for linux
if path_vrep == None:
path_vrep = dsp.find_executable('vrep')
else:
path_vrep = dir_vrep + 'vrep'
print('(vrepper) path to your V-REP executable is:', path_vrep)
if path_vrep is None:
raise Exception("Sorry I couldn't find V-Rep binary. "
"Please make sure it's in the PATH environmental variable")
# start V-REP in a sub process
# vrep.exe -gREMOTEAPISERVERSERVICE_PORT_DEBUG_PREENABLESYNC
# where PORT -> 19997, DEBUG -> FALSE, PREENABLESYNC -> TRUE
# by default the server will start at 19997,
# use the -g argument if you want to start the server on a different port.
args = [path_vrep, '-gREMOTEAPISERVERSERVICE_' + str(self.port_num) + '_FALSE_TRUE']
if headless:
args.append('-h')
# instance created but not started.
self.instance = instance(args, suppress_output)
self.cid = -1
# clientID of the instance when connected to server,
# to differentiate between instances in the driver
self.started = False
# is the simulation currently running (as far as we know)
self.sim_running = False
# assign every API function call from vrep to self
vrep_methods = [a for a in dir(vrep) if
not a.startswith('__') and isinstance(getattr(vrep, a), types.FunctionType)]
def assign_from_vrep_to_self(name):
wrapee = getattr(vrep, name)
arg0 = getargspec(wrapee)[0][0]
if arg0 == 'clientID':
def func(*args, **kwargs):
return wrapee(self.cid, *args, **kwargs)
else:
def func(*args, **kwargs):
return wrapee(*args, **kwargs)
setattr(self, name, func)
for name in vrep_methods:
assign_from_vrep_to_self(name)
def find_free_port_to_use(
self): # https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
# start everything
def start(self):
if self.started == True:
raise RuntimeError('you should not call start() more than once')
print('(vrepper) starting an instance of V-REP...')
self.instance.start()
# try to connect to V-REP instance via socket
retries = 0
while True:
print('(vrepper) trying to connect to server on port', self.port_num, 'retry:', retries)
# vrep.simxFinish(-1) # just in case, close all opened connections
self.cid = self.simxStart(
'127.0.0.1', self.port_num,
waitUntilConnected=True,
doNotReconnectOnceDisconnected=True,
timeOutInMs=1000,
commThreadCycleInMs=0) # Connect to V-REP
if self.cid != -1:
print('(vrepper) Connected to remote API server!')
break
else:
retries += 1
if retries > 15:
self.end()
raise RuntimeError('(vrepper) Unable to connect to V-REP after 15 retries.')
# Now try to retrieve data in a blocking fashion (i.e. a service call):
objs, = check_ret(self.simxGetObjects(
sim_handle_all,
blocking))
print('(vrepper) Number of objects in the scene: ', len(objs))
# Now send some data to V-REP in a non-blocking fashion:
self.simxAddStatusbarMessage(
'(vrepper)Hello V-REP!',
oneshot)
# setup a useless signal
self.simxSetIntegerSignal('asdf', 1, blocking)
print('(vrepper) V-REP instance started, remote API connection created. Everything seems to be ready.')
self.started = True
return self
# kill everything, clean up
def end(self):
print('(vrepper) shutting things down...')
# Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
# vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
if self.sim_running:
self.stop_simulation()
self.simxFinish()
self.instance.end()
print('(vrepper) everything shut down.')
return self
def load_scene(self, fullpathname):
print('(vrepper) loading scene from', fullpathname)
try:
check_ret(self.simxLoadScene(fullpathname,
0, # assume file is at server side
blocking))
except:
print('(vrepper) scene loading failure')
raise
print('(vrepper) scene successfully loaded')
def start_blocking_simulation(self):
self.start_simulation(True)
def start_nonblocking_simulation(self):
self.start_simulation(False)
def start_simulation(self, is_sync):
# IMPORTANT
# you should poll the server state to make sure
# the simulation completely stops before starting a new one
while True:
# poll the useless signal (to receive a message from server)
check_ret(self.simxGetIntegerSignal(
'asdf', blocking))
# check server state (within the received message)
e = self.simxGetInMessageInfo(
simx_headeroffset_server_state)
# check bit0
not_stopped = e[1] & 1
if not not_stopped:
break
# enter sync mode
check_ret(self.simxSynchronous(is_sync))
check_ret(self.simxStartSimulation(blocking))
self.sim_running = True
def make_simulation_synchronous(self, sync):
if not self.sim_running:
print('(vrepper) simulation doesn\'t seem to be running. starting up')
self.start_simulation(sync)
else:
check_ret(self.simxSynchronous(sync))
def stop_simulation(self):
check_ret(self.simxStopSimulation(oneshot), ignore_one=True)
self.sim_running = False
@deprecated('Please use method "stop_simulation" instead.')
def stop_blocking_simulation(self):
self.stop_simulation()
def step_blocking_simulation(self):
check_ret(self.simxSynchronousTrigger())
def get_object_handle(self, name):
handle, = check_ret(self.simxGetObjectHandle(name, blocking))
return handle
def get_object_by_handle(self, handle, is_joint=True):
"""
Get the vrep object for a given handle
:param int handle: handle code
:param bool is_joint: True if the object is a joint that can be moved
:returns: vrepobject
"""
return vrepobject(self, handle, is_joint)
def get_object_by_name(self, name, is_joint=True):
"""
Get the vrep object for a given name
:param str name: name of the object
:param bool is_joint: True if the object is a joint that can be moved
:returns: vrepobject
"""
return self.get_object_by_handle(self.get_object_handle(name), is_joint)
@staticmethod
def create_params(ints=[], floats=[], strings=[], bytes=''):
if bytes == '':
bytes_in = bytearray()
else:
bytes_in = bytes
return (ints, floats, strings, bytes_in)
def call_script_function(self, function_name, params, script_name="remoteApiCommandServer"):
"""
Calls a function in a script that is mounted as child in the scene
:param str script_name: the name of the script that contains the function
:param str function_name: the name of the function to call
:param tuple params: the parameters to call the function with (must be 4 parameters: list of integers, list of floats, list of string, and bytearray
:returns: tuple (res_ints, res_floats, res_strs, res_bytes)
WHERE
list res_ints is a list of integer results
list res_floats is a list of floating point results
list res_strs is a list of string results
bytearray res_bytes is a bytearray containing the resulting bytes
"""
assert type(params) is tuple
assert len(params) == 4
return check_ret(self.simxCallScriptFunction(
script_name,
sim_scripttype_childscript,
function_name,
params[0], # integers
params[1], # floats
params[2], # strings
params[3], # bytes
blocking
))
def get_global_variable(self, name, is_first_time):
if is_first_time:
return vrep.simxGetFloatSignal(self.cid, name, vrep.simx_opmode_streaming)
else:
return vrep.simxGetFloatSignal(self.cid, name, vrep.simx_opmode_buffer)
def _convert_byte_image_to_color(self, res, img):
reds = np.zeros(res[0] * res[1], dtype=np.uint8)
greens = np.zeros(res[0] * res[1], dtype=np.uint8)
blues = np.zeros(res[0] * res[1], dtype=np.uint8)
for i in range(0, len(img), 3):
reds[int(i / 3)] = img[i] & 255
greens[int(i / 3)] = img[i + 1] & 255
blues[int(i / 3)] = img[i + 2] & 255
img_out = np.zeros((res[0], res[1], 3), dtype=np.uint8)
img_out[:, :, 0] = | np.array(reds) | numpy.array |
"""
AI library in python using numpy
Author: <NAME> (<EMAIL>)
https://www.github.com/srirambandi
MIT License
"""
import numpy as np
# the Parameter object: stores weights and derivatives of weights(after backprop)
# of each layer in the model
class Parameter:
def __init__(self, shape=(0, 0), data=None, grad=None, eval_grad=True, node_id=None, graph=None,
init_zeros=False, init_ones=False, constant=1.0,
uniform=False, low=-1.0, high = 1.0,
normal=False, mean=0.0, std=0.01):
# properties
self.shape = shape
self.data = data
self.grad = grad
self.eval_grad = eval_grad # if the parameter is a variable or an input/constant
# node id - in the bfs like graph walk during forward pass, the node numeber
# of the path ie., the latest backward op of which this parameter was an output
self.node_id = node_id
if graph is not None: # graph object this parameter belongs to
self.graph = graph
else:
self.graph = G
# constant initializations
self.init_zeros = init_zeros
self.init_ones = init_ones
self.constant = constant
# initializing from distributions
self.uniform = uniform
self.low = low # high and low of uniform
self.high = high # distribution to initialize the parameter
self.mean = mean # mean and variance of the gaussian
self.std = std # distribution to initialize the parameter
# creating weight and gradient tensors
self.init_params()
def init_params(self):
if self.data is not None:
# initiating weights with passed data object of kind list/numpy-ndarray
if not isinstance(self.data, np.ndarray):
self.data = np.array(self.data)
self.shape = self.data.shape # resolving conflict with passed shape and data shape
elif self.init_zeros:
# initiating with zeros of given shape
self.data = np.zeros(self.shape)
elif self.init_ones:
# initiating with ones(or a constant) of given shape
self.data = np.ones(self.shape) * self.constant
elif self.uniform:
# random initiation with uniform distribution
self.data = np.random.uniform(self.low, self.high, self.shape)
else:
# random initiation with gaussian distribution
self.normal = True
self.data = np.random.normal(self.mean, self.std, self.shape)
# setting gradient of parameter wrt some scalar, as zeros
if self.grad is None:
self.grad = np.zeros(self.shape)
else:
if not isinstance(self.grad, np.ndarray):
self.grad = np.array(self.grad)
assert self.data.shape == self.grad.shape, 'data and grad should be of same shape'
def __repr__(self):
parameter_schema = 'Parameter(shape={}, eval_grad={}) containing:\n'.format(self.shape, self.eval_grad)
parameter_schema += 'Data: {}'.format(self.data)
return parameter_schema
# this function computes the gradients of the parameters, by executing
# the backprop ops in reverse order to the forward propagation with chain rule
def backward(self, grad=None, to=None):
# assign gradient
if self.node_id is None:
return
if grad is not None:
if not isinstance(grad, np.ndarray):
self.grad = np.array(grad)
if to is None:
to_node_id = 0 # execute backward all the way to start
else:
to_node_id = to.node_id + 1 # execute backward to just before this node
for node in reversed(self.graph.nodes[to_node_id:int(self.node_id) + 1]):
node['backprop_op']() # executing the back-propagation operation
def __getitem__(self, key):
axis = []
return_scalar = True
for _ in range(len(key)):
if isinstance(key[_], int):
axis.append(_)
if isinstance(key[_], slice):
return_scalar = False
axis = tuple(axis)
if return_scalar:
return self.data[key]
else:
return Parameter(data=np.expand_dims(self.data[key], axis=axis),
grad=np.expand_dims(self.grad[key], axis=axis))
def __add__(self, other):
if not isinstance(other, Parameter):
other = Parameter(data=other, eval_grad=False, graph=self.graph)
assert self.shape == other.shape, ('Objects not of same shape. Use G.add() with axis argument', self.shape, other.shape)
return self.graph.add(self, other)
def __sub__(self, other):
if not isinstance(other, Parameter):
other = Parameter(data=other, eval_grad=False, graph=self.graph)
assert self.shape == other.shape, ('Objects not of same shape. Use G.subtract() with axis argument', self.shape, other.shape)
return self.graph.subtract(self, other)
def __mul__(self, other):
if not isinstance(other, Parameter):
other = Parameter(data=other, eval_grad=False, graph=self.graph)
assert self.shape == other.shape, ('Objects not of same shape. Use G.multiply() with axis argument', self.shape, other.shape)
return self.graph.multiply(self, other)
def __matmul__(self, other):
if not isinstance(other, Parameter):
other = Parameter(data=other, eval_grad=False, graph=self.graph)
return self.graph.dot(self, other)
def __truediv__(self, other):
if not isinstance(other, Parameter):
other = Parameter(data=other, eval_grad=False, graph=self.graph)
assert self.shape == other.shape, 'Objects not of same shape. Use G.divide() with axis argument'
return self.graph.divide(self, other)
def __pow__(self, other):
return self.graph.power(self, other)
# transpose
def T(self):
self.data = self.data.T
self.grad = self.grad.T
self.shape = tuple(reversed(self.shape))
return self
# Computational Graph wannabe: stores the backward operation for every
# forward operation during forward-propagation, in a breadth-fist manner
class ComputationalGraph:
def __init__(self, grad_mode=True):
self.grad_mode = grad_mode
self.nodes = list()
# functions required for deep learning models and their respective backward operations
def dot(self, W, x): # dot product of vectors and matrices
assert W.shape[1] == x.shape[0], 'shape mismatch in dot() operation - W: {}, x: {}'.format(W.shape, x.shape)
out = Parameter(data=np.dot(W.data, x.data), graph=self)
if self.grad_mode:
def backward():
# useful: http://cs231n.stanford.edu/slides/2018/cs231n_2018_ds02.pdf
# print('dot')
if W.eval_grad:
W.grad += np.dot(out.grad, x.data.T)
if x.eval_grad:
x.grad += np.dot(out.grad.T, W.data).T
# return (x.grad, W.grad)
node = {'func': '@', 'inputs': [W, x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def add(self, x, y, axis=()): # element wise addition
# bias should be passed in position of y
out = Parameter(data=np.add(x.data, y.data), graph=self)
if self.grad_mode:
def backward():
# print('add')
if x.eval_grad:
x.grad += out.grad
if y.eval_grad:
y.grad += np.sum(out.grad, axis = axis).reshape(y.shape) # in case of unequal sizes of inputs
# return (x.grad, y.grad)
node = {'func': '+', 'inputs': [x, y], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def subtract(self, x, y, axis=()): # element wise subtraction
out = Parameter(data=np.subtract(x.data, y.data), graph=self)
if self.grad_mode:
def backward():
# print('subtract')
if x.eval_grad:
x.grad += out.grad
if y.eval_grad:
y.grad -= np.sum(out.grad, axis=axis).reshape(y.shape) # in case of unequal sizes of inputs
# return (x.grad, y.grad)
node = {'func': '-', 'inputs': [x, y], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def multiply(self, x, y, axis=()): # element wise vector multiplication
out = Parameter(data=np.multiply(x.data, y.data), graph=self)
if self.grad_mode:
def backward():
# print('multiply')
if x.eval_grad:
x.grad += np.multiply(out.grad, y.data)
if y.eval_grad:
y.grad += np.sum(np.multiply(out.grad, x.data), axis=axis).reshape(y.shape) # in case of unequal sizes of inputs
# return (x.grad, y.grad)
node = {'func': '*', 'inputs': [x, y], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def divide(self, x, y, axis=()): # element wise vector division
out = Parameter(data= np.divide(x.data, y.data + 1e-8), graph=self)
if self.grad_mode:
def backward():
# print('divide')
if x.eval_grad:
x.grad += np.multiply(out.grad, np.divide(1.0, y.data + 1e-8))
if y.eval_grad:
y.grad += np.sum(np.multiply(out.grad, np.multiply(out.data, np.divide(-1.0, y.data + 1e-8))), axis=axis).reshape(y.shape) # in case of unequal sizes of inputs
# return (x.grad, y.grad)
node = {'func': '/', 'inputs': [x, y], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def sum(self, h, axis=None): # sum of all elements in the matrix
if axis == None:
res = np.sum(h.data).reshape(1, 1)
else:
res = np.sum(h.data, axis=axis, keepdims=True)
out = Parameter(data=res, graph=self)
if self.grad_mode:
def backward():
# print('sum')
if h.eval_grad:
h.grad += out.grad
# return h.grad
node = {'func': 'sum', 'inputs': [h], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def power(self, h, exp): # element wise power
out = Parameter(h.shape, init_zeros=True, graph=self)
out.data = np.power(h.data, exp) if exp >= 0 else np.power(h.data + 1e-8, exp) # numerical stability for -ve power
if self.grad_mode:
def backward():
# print('power')
if h.eval_grad:
if exp >= 0:
h.grad += np.multiply(out.grad, exp * np.power(h.data, exp - 1))
else:
h.grad += np.multiply(out.grad, exp * np.power(h.data + 1e-8, exp - 1))
# return h.grad
node = {'func': '^{}'.format(exp), 'inputs': [h], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def log(self, h): # element wise logarithm
out = Parameter(data=np.log(h.data + 1e-8), graph=self) # numerical stability for values ~0
if self.grad_mode:
def backward():
# print('log')
if h.eval_grad:
h.grad += np.multiply(out.grad, np.divide(1.0, h.data + 1e-8))
# return h.grad
node = {'func': 'log', 'inputs': [h], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
# layers functions
def conv1d(self, x, K, s=(1,), p=(0,)):
# faster 1d convolution operation
if not isinstance(s, tuple):
s = (s,)
if not isinstance(p, tuple):
p = (p,)
C = K.shape[1] # number of input channels
F = K.shape[0] # number of output filters
i = x.shape[1:-1] # input channel shape
k = K.shape[2:] # kernel filter shape
N = x.shape[-1] # Batch size
# Figure out output dimensions
o = tuple(map(lambda i, k, s, p: int((i + 2*p - k)/s + 1), i, k, s, p))
pad_i = tuple(map(lambda i, p: i + 2*p, i, p))
# padding the input
pad_x = np.pad(x.data, ((0, 0), (*p, *p), (0, 0)), mode='constant')
# get strided view of padded input by picking appropriate strides
shape = (C, *k, *o, N)
strides = (pad_i[0] * N, N, s[0] * N, 1)
strides = pad_x.itemsize * np.array(strides)
stride_x = np.lib.stride_tricks.as_strided(pad_x, shape=shape, strides=strides)
x_cols = np.ascontiguousarray(stride_x)
x_cols = x_cols.reshape(C * k[0], o[0] * N)
# convolution operation - matrix multiplication of strided array with kernel
out = K.data.reshape(F, -1).dot(x_cols)
# Reshape the output
out = out.reshape(F, *o, N)
out = np.ascontiguousarray(out)
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('conv1d')
if K.eval_grad:
K.grad += np.ascontiguousarray(out.grad.reshape(F, -1).dot(x_cols.T).reshape(K.shape))
if x.eval_grad:
pad_x_grad = np.zeros(pad_x.shape)
for r in range(out.shape[1]):
# solving gradient for input feature map that caused the elements in r position of every output filter
# in every batch; similar to kernel gradient method, but the matrix collapses along filters dimention using sum
_ = out.grad[:, r, :].reshape(F, 1, 1, N)
pad_x_grad[:, r*s[0]:r*s[0] + k[0], :] += np.sum(np.multiply(_, K.data.reshape(*K.shape, 1)), axis=0)
# cutting the padded portion from the input-feature-map's gradient
# and updating the gradient of actual input feature map(non-padded) - unpadding and updating
x.grad += pad_x_grad[:, p[0]:pad_x_grad.shape[1]-p[0], :]
# return (K.grad, x.grad)
node = {'func': 'conv1d', 'inputs': [x, K], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def conv2d_old(self, x, K, s=(1, 1), p=(0, 0)):
# useful: https://arxiv.org/pdf/1603.07285.pdf
# 2d convolution operation - simple but inefficient implementation
# Conv2d lasyer uses conv2d_faster for faster computation
if not isinstance(s, tuple):
s = (s, s)
if not isinstance(p, tuple):
p = (p, p)
F = K.shape[0] # number of output filters
C = K.shape[1] # number of input channels
k = K.shape[2:] # don't confuse b/w K(big) - the kernel set and k(small) - a single kernel's shape, of some cth-channel in a kth-filter
i = x.shape[1:-1] # input shape of any channel of the input feature map before padding
N = x.shape[-1] # batch size of the input
o = tuple(map(lambda i, k, s, p: int((i + 2*p - k)/s + 1), i, k, s, p))
pad_i = tuple(map(lambda i, p: i + 2*p, i, p))
out = np.zeros((F, *o, N)) # output feature maps
pad_x = np.pad(x.data, ((0, 0), p, p, (0, 0)), mode='constant')
pad_x = pad_x.reshape(1, *pad_x.shape)
# convolution function computing cross-correlation instead of actual convolution - otherwise have to use
# flipped kernels which doesn't effect learning
kernel = K.data.reshape(*K.shape, 1)
for r in range(out.shape[1]): # convolving operation here
for c in range(out.shape[2]): # traversing rous and columns of feature map
# multiplying traversed grid portions of padded input feature maps with kernel grids element-wise
# and summing the resulting matrix to produce elements of output maps, over all filters and batches
out[:, r, c, :] += np.sum(np.multiply(pad_x[:, :, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :], kernel), axis=(1, 2, 3))
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('conv2d')
if K.eval_grad:
for r in range(out.shape[1]):
for c in range(out.shape[2]):
# solving gradient for each kernel filter that caused the elements in r, c position of every output filter
# in every bacth; sketch and think, with input stacked fi times to make computation fast
_ = out.grad[:, r, c, :].reshape(F, 1, 1, 1, N)
# updating the kernel filter set gradient - there will be RxC such updates
K.grad += np.sum(np.multiply(_, pad_x[:, :, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :]), axis = -1)
if x.eval_grad:
pad_x_grad = np.zeros((C, *pad_i, N))
for r in range(out.shape[1]):
for c in range(out.shape[2]):
# solving gradient for input feature map that caused the elements in r, c position of every output filter
# in every batch; similar to kernel gradient method, but the matrix collapses along filters dimention using sum
_ = out.grad[:, r, c, :].reshape(F, 1, 1, 1, N)
pad_x_grad[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] += np.sum(np.multiply(_, kernel), axis=0)
# cutting the padded portion from the input-feature-map's gradient
# and updating the gradient of actual input feature map(non-padded) - unpadding and updating
x.grad += pad_x_grad[:, p[0]:pad_x_grad.shape[1]-p[0], p[1]:pad_x_grad.shape[2]-p[1], :]
# return (K.grad, x.grad)
node = {'func': 'conv2d', 'inputs': [x, K], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def conv2d(self, x, K, s=(1, 1), p=(0, 0)):
# faster 2d convolution operation
if not isinstance(s, tuple):
s = (s, s)
if not isinstance(p, tuple):
p = (p, p)
C = K.shape[1] # number of input channels
F = K.shape[0] # number of output filters
i = x.shape[1:-1] # input channel shape
k = K.shape[2:] # kernel filter shape
N = x.shape[-1] # Batch size
# Figure out output dimensions
o = tuple(map(lambda i, k, s, p: int((i + 2*p - k)/s + 1), i, k, s, p))
pad_i = tuple(map(lambda i, p: i + 2*p, i, p))
# padding the input
pad_x = np.pad(x.data, ((0, 0), p, p, (0, 0)), mode='constant')
# get strided view of padded input by picking appropriate strides
shape = (C, *k, *o, N)
strides = (pad_i[0] * pad_i[1] * N, pad_i[1] * N, N, s[0] * pad_i[1] * N, s[1] * N, 1)
strides = pad_x.itemsize * np.array(strides)
stride_x = np.lib.stride_tricks.as_strided(pad_x, shape=shape, strides=strides)
x_cols = np.ascontiguousarray(stride_x)
x_cols = x_cols.reshape(C * k[0] * k[1], o[0] * o[1] * N)
# convolution operation - matrix multiplication of strided array with kernel
out = K.data.reshape(F, -1).dot(x_cols)
# Reshape the output
out = out.reshape(F, *o, N)
out = np.ascontiguousarray(out)
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('conv2d')
if K.eval_grad:
K.grad += np.ascontiguousarray(out.grad.reshape(F, -1).dot(x_cols.T).reshape(K.shape))
if x.eval_grad:
pad_x_grad = np.zeros(pad_x.shape)
for r in range(out.shape[1]):
for c in range(out.shape[2]):
# solving gradient for input feature map that caused the elements in r, c position of every output filter
# in every batch; similar to kernel gradient method, but the matrix collapses along filters dimention using sum
_ = out.grad[:, r, c, :].reshape(F, 1, 1, 1, N)
pad_x_grad[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] += np.sum(np.multiply(_, K.data.reshape(*K.shape, 1)), axis=0)
# cutting the padded portion from the input-feature-map's gradient
# and updating the gradient of actual input feature map(non-padded) - unpadding and updating
x.grad += pad_x_grad[:, p[0]:pad_x_grad.shape[1]-p[0], p[1]:pad_x_grad.shape[2]-p[1], :]
# return (K.grad, x.grad)
node = {'func': 'conv2d', 'inputs': [x, K], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def conv_transpose2d_old(self, x, K, s=(1, 1), p=(0, 0), a=(0, 0)):
# useful: https://arxiv.org/pdf/1603.07285.pdf
# 2d convolutional transpose operation - simple but inefficient implementation
# ConvTranspose2d lasyer uses conv_transpose2d_faster for faster computation
if not isinstance(s, tuple):
s = (s, s)
if not isinstance(p, tuple):
p = (p, p)
if not isinstance(a, tuple):
a = (a, a)
F = K.shape[0] # number of filters - here number of feature input planes
C = K.shape[1] # number of input channels - here number of image output planes
k = K.shape[2:] # don't confuse b/w K(big) - the kernel set and k(small) - a single kernel's shape, of some cth-channel in a kth-filter
i = x.shape[1:-1] # input shape of any channel of the input feature map before padding
N = x.shape[-1] # batch size of the input
o = tuple((map(lambda i, k, s, p, a: int((i - 1)*s + a + k - 2*p), i, k, s, p, a)))
pad_o = tuple(map(lambda o, p: o + 2*p, o, p))
pad_out = np.zeros((C, *pad_o, N)) # output feature maps
# convolution function computing cross-correlation instead of actual convolution like conv2d
kernel = K.data.reshape(*K.shape, 1)
for r in range(x.shape[1]):
for c in range(x.shape[2]):
# computing output image feature map by convolving across each element of input feature map with kernel
_ = x.data[:, r, c, :].reshape(F, 1, 1, 1, N)
pad_out[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] += np.sum(np.multiply(_, kernel), axis=0)
# cutting the padded portion from the input-feature-map's gradient
# and updating the gradient of actual input feature map(non-padded) - unpadding and updating
out = pad_out[:, p[0]:pad_out.shape[1]-p[0], p[1]:pad_out.shape[2]-p[1], :]
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('conv_transpose2d')
pad_out_grad = np.pad(out.grad, ((0, 0), p, p, (0, 0)), mode='constant')
pad_out_grad = pad_out_grad.reshape(1, *pad_out_grad.shape)
if K.eval_grad:
for r in range(x.shape[1]):
for c in range(x.shape[2]):
# solving gradient for each kernel filter
_ = x.data[:, r, c, :].reshape(F, 1, 1, 1, N)
# updating the kernel filter set gradient - there will be RxC such updates
K.grad += np.sum(np.multiply(_, pad_out_grad[:, :, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :]), axis = -1)
if x.eval_grad:
for r in range(x.shape[1]):
for c in range(x.shape[2]):
# solving gradient for input feature map
x.grad[:, r, c, :] += np.sum(np.multiply(pad_out_grad[:, :, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :], kernel), axis=(1, 2, 3))
# return (K.grad, x.grad)
node = {'func': 'conv_transpose2d', 'inputs': [x, K], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def conv_transpose2d(self, x, K, s=(1, 1), p=(0, 0), a=(0, 0)):
# faster 2d convolution operation
if not isinstance(s, tuple):
s = (s, s)
if not isinstance(p, tuple):
p = (p, p)
if not isinstance(a, tuple):
a = (a, a)
F = K.shape[0] # number of input filters
C = K.shape[1] # number of output channels
i = x.shape[1:-1] # input channel shape
k = K.shape[2:] # kernel filter shape
N = x.shape[-1] # Batch size
o = tuple((map(lambda i, k, s, p, a: int((i - 1)*s + a + k - 2*p), i, k, s, p, a)))
pad_o = tuple(map(lambda o, p: o + 2*p, o, p))
pad_out = np.zeros((C, *pad_o, N))
for r in range(x.shape[1]):
for c in range(x.shape[2]):
# computing output image feature map by convolving across each element of input feature map with kernel
_ = x.data[:, r, c, :].reshape(F, 1, 1, 1, N)
pad_out[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] += np.sum(np.multiply(_, K.data.reshape(*K.shape, 1)), axis=0)
# cutting the padded portion from the input-feature-map's gradient
# and updating the gradient of actual input feature map(non-padded) - unpadding and updating
out = pad_out[:, p[0]:pad_out.shape[1]-p[0], p[1]:pad_out.shape[2]-p[1], :]
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('conv_transpose2d')
# padding the output gradient
pad_out_grad = np.pad(out.grad, ((0, 0), p, p, (0, 0)), mode='constant')
# get strided view of padded output gradient by picking appropriate strides
shape = (C, *k, *i, N)
strides = (pad_o[0] * pad_o[1] * N, pad_o[1] * N, N, s[0] * pad_o[1] * N, s[1] * N, 1)
strides = pad_out_grad.itemsize * np.array(strides)
stride_out_grad = np.lib.stride_tricks.as_strided(pad_out_grad, shape=shape, strides=strides)
out_grad_cols = np.ascontiguousarray(stride_out_grad)
out_grad_cols = out_grad_cols.reshape(C * k[0] * k[1], i[0] * i[1] * N)
if K.eval_grad:
K.grad += np.ascontiguousarray(x.data.reshape(F, -1).dot(out_grad_cols.T).reshape(K.shape))
if x.eval_grad:
x_grad = K.data.reshape(F, -1).dot(out_grad_cols)
# Reshape the gradient
x_grad = x_grad.reshape(F, *i, N)
x.grad += np.ascontiguousarray(x_grad)
# return (K.grad, x.grad)
node = {'func': 'conv_transpose2d', 'inputs': [x, K], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def max_pool2d(self, x, k=None, s=None, p=(0, 0)): # maxpool layer(no params), used generally after Conv2d - simple but inefficient implementation
# useful: https://arxiv.org/pdf/1603.07285.pdf
if s is None:
s = k
if not isinstance(k, tuple):
k = (k, k)
if not isinstance(s, tuple):
s = (s, s)
F = x.shape[0] # number of input filter planes
i = x.shape[1:-1] # input shape of any channel of the input feature map before padding
N = x.shape[-1] # Batch size
o = tuple(map(lambda i, k, s, p: int((i + 2*p - k)/s + 1), i, k, s, p))
pad_i = tuple(map(lambda i, p: i + 2*p, i, p))
out = np.zeros((F, *o, N))
pad_x = np.pad(x.data, ((0, 0), p, p, (0, 0)), mode='constant')
for r in range(out.shape[1]): # convolving operation here(kinda)
for c in range(out.shape[2]): # traversing rous and columns of feature map
# Selecting max element in the current position where kernel sits on feature map
# The kernel moves in a convolution manner similar to conv2d
_ = pad_x[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :]
out[:, r, c, :] = np.max(_, axis=(1, 2))
if self.grad_mode: # seems inefficient; will improve this whole maxpool op later
# Also storing value 1 at locations in the input that caused the output values(max locations); makes life easy during backprop
# if multiple 0s occur and max is 0 then it shouldn't count. weeding out such cases by assigning
# NaN and later zeroing out their gradient locations too; this was a bug which is fixed now :)
out[:, r, c, :][out[:, r, c, :] == 0] = np.nan
_ -= out[:, r, c, :].reshape(F, 1, 1, N)
_[np.isnan(_)] = -1 # removing all zeros locations
# can't use '_' object from above for the below assignment, so using the entire notation :(
pad_x[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] = np.where(pad_x[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] < 0, 0, 1.0)
out[:, r, c, :][np.isnan(out[:, r, c, :])] = 0
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('maxpool2d')
if x.eval_grad:
for r in range(out.shape[1]):
for c in range(out.shape[2]):
# multiplying each 'mask' like volume(single 1s in the volumes along all batches) with the gradient
# at region whose value was caused by the mask region's input
pad_x[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] *= out.grad[:, r, c, :].reshape(F, 1, 1, N)
# cutting the padded portion from the input gradient
# and updating the gradient of actual input(non-padded) - unpadding and updating
x.grad += pad_x[:, p[0]:pad_x.shape[1]-p[0], p[1]:pad_x.shape[2]-p[1], :]
# return (x.grad)
node = {'func': 'maxpool', 'inputs': [x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def max_pool2d_faster(self, x, k=(2, 2), s=(2,2), p=(0, 0)): # maxpool layer(no params)
# useful: https://arxiv.org/pdf/1603.07285.pdf
F = x.shape[0] # number of input filter planes
i = x.shape[1:-1] # input shape of any channel of the input feature map before padding
N = x.shape[-1] # Batch size
# Figure out output dimensions
o = tuple(map(lambda i, k, s, p: int((i + 2*p - k)/s + 1), i, k, s, p))
pad_i = tuple(map(lambda i, p: i + 2*p, i, p))
# padding the input
pad_x = np.pad(x.data, ((0, 0), p, p, (0, 0)), mode='constant')
# get strided view of padded input by picking appropriate strides
shape = (F, *k, *o, N)
strides = (pad_i[0] * pad_i[1] * N, pad_i[1] * N, N, s[0] * pad_i[1] * N, s[1] * N, 1)
strides = pad_x.itemsize * np.array(strides)
stride_x = np.lib.stride_tricks.as_strided(pad_x, shape=shape, strides=strides)
x_cols = np.ascontiguousarray(stride_x)
x_cols = x_cols.reshape(F, k[0] * k[1], *o, N)
# store indices of the max location of each patch
max_indices = np.argmax(x_cols, axis=1)
out = np.max(x_cols, axis=1)
out = Parameter(data=out, graph=self)
if self.grad_mode:
def backward():
# print('maxpool2d')
if x.eval_grad:
for r in range(out.shape[1]):
for c in range(out.shape[2]):
# multiplying each 'mask' like volume(single 1s in the volumes along all batches) with the gradient
# at region whose value was caused by the mask region's input
pad_x[:, r*s[0]:r*s[0] + k[0], c*s[1]:c*s[1] + k[1], :] *= out.grad[:, r, c, :].reshape(F, 1, 1, N)
# cutting the padded portion from the input gradient
# and updating the gradient of actual input(non-padded) - unpadding and updating
x.grad += pad_x[:, p[0]:pad_x.shape[1]-p[0], p[1]:pad_x.shape[2]-p[1], :]
# return (x.grad)
node = {'func': 'maxpool', 'inputs': [x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def dropout(self, x, p=0.5): # dropout regularization layer!
# useful: https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
if self.grad_mode:
# drop activation units randomly during training
# a unit is present with probability p
dropout_mask = np.random.binomial(np.ones(x.shape, dtype='int64'), p)
else:
# scale activations of units by p during testing
# units are always present
dropout_mask = p
# drop/sclae
out = Parameter(data=dropout_mask*x.data, graph=self)
if self.grad_mode:
def backward():
# print('dropout')
if x.eval_grad:
x.grad += out.grad*dropout_mask # only activated units get gradients
# return x.grad
node = {'func': 'dropout', 'inputs': [x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
# hidden and output units activations
def relu(self, z): # element wise ReLU activations
out = Parameter(data=np.maximum(z.data, 0), graph=self)
if self.grad_mode:
def backward():
# print('relu')
if z.eval_grad:
z.grad += out.grad.copy()
z.grad[z.data < 0] = 0
# return z.grad
node = {'func': 'relu', 'inputs': [z], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def lrelu(self, z, alpha=1e-2): # element wise Leaky ReLU activations
out = Parameter(data=np.maximum(z.data, alpha * z.data), graph=self)
if self.grad_mode:
def backward():
# print('lrelu')
if z.eval_grad:
z.grad += out.grad.copy()
z.grad[z.data < 0] *= alpha
# return z.grad
node = {'func': 'lrelu', 'inputs': [z], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def sigmoid(self, z): # element wise sigmoid activations
shape = z.shape
out = Parameter(shape, init_zeros=True, graph=self)
out.data = 1.0/(1.0 + np.exp(-1.0*z.data))
if self.grad_mode:
def backward():
# print('sigmoid')
if z.eval_grad:
z.grad += np.multiply(np.multiply(out.data, 1.0 - out.data), out.grad)
# return z.grad
node = {'func': 'sigmoid', 'inputs': [z], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def softmax(self, z): # calculates probs for the unnormalized log probabilities of previous layer
shape = z.shape
out = Parameter(shape, init_zeros=True, graph=self)
out.data = np.exp(z.data - np.max(z.data)) / np.sum(np.exp(z.data - np.max(z.data)), axis=0).reshape(1, -1)
if self.grad_mode:
def backward():
# print('softmax')
if z.eval_grad:
# directly coding the end result instead of formula - easy this way
z.grad += out.data - np.where(out.grad == 0, 0, 1.0)
# return z.grad
node = {'func': 'softmax', 'inputs': [z], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def tanh(self, z): # element wise tanh activations
out = Parameter(data=np.tanh(z.data), graph=self)
if self.grad_mode:
def backward():
# print('tanh')
if z.eval_grad:
z.grad += np.multiply(1 - np.multiply(out.data, out.data), out.grad)
# return z.grad
node = {'func': 'tanh', 'inputs': [z], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
# data manipulation/view functions
def split(self, W, sections=1, axis=0):
outs = np.split(W.data, sections, axis=axis)
outs_list = list()
for e in outs:
o = Parameter(data=e, graph=self)
outs_list.append(o)
if self.grad_mode:
def backward():
# print('split')
outs_grads = [o.grad for o in outs_list]
if W.eval_grad:
W.grad += np.concatenate(outs_grads, axis=axis)
# return W.grad
node = {'func': 'split', 'inputs': [W], 'outputs': outs_list, 'backprop_op': lambda: backward()}
for out in outs_list:
out.node_id = len(self.nodes)
self.nodes.append(node)
return outs_list
def cat(self, inputs_list, axis=0):
indices = [input.shape[axis] for input in inputs_list]
indices = [sum(indices[:_+1]) for _ in range(len(indices))]
out = Parameter(data=np.concatenate(inputs_list, axis=axis), graph=self)
if self.grad_mode:
def backward():
# print('cat')
input_grads = np.split(out.grad, indices, axis=axis)
for _ in range(len(inputs_list)):
if inputs_list[_].eval_grad:
inputs_list[_].grad += input_grads[_]
# return *[input.grad for input in inputs_list]
node = {'func': 'cat', 'inputs': [inputs_list], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def T(self, x): # transpose
out = Parameter(data=x.data.T, graph=self)
if self.grad_mode:
def backward():
# print('T')
if x.eval_grad:
x.grad += out.grad.T
# return x.grad
node = {'func': 'transpose', 'inputs': [x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
def reshape(self, x, new_shape=None):
old_shape = x.shape
batch_size = old_shape[-1]
if new_shape == None: # flatten
new_shape = x.data.reshape(-1, batch_size).shape
else:
new_shape = (*new_shape, batch_size)
out = Parameter(new_shape, init_zeros=True, graph=self)
out.data = x.data.reshape(new_shape)
if self.grad_mode:
def backward():
# print('reshape')
if x.eval_grad:
x.grad += out.grad.reshape(old_shape)
# return x.grad
node = {'func': 'reshape', 'inputs': [x], 'outputs': [out], 'backprop_op': lambda: backward()}
out.node_id = len(self.nodes)
self.nodes.append(node)
return out
G = ComputationalGraph()
# generic module class to add useful features like save/load model from files, get parameters etc.
class Module(object):
def __init__(self):
pass
def __repr__(self):
module_schema = str(self.__class__.__name__) + '(\n'
for name, layer in self.get_module_layers().items():
module_schema += ' ' + str(name) + ': ' + str(layer) + '\n'
module_schema += ')'
return module_schema
def save(self, file=None): # model.save() - saves the state of the network
print('saving model...')
save_dict = dict()
module_layers = self.get_module_layers()
for layer_name, layer in module_layers.items():
layer_params = layer.get_module_params()
for param_name, param in layer_params.items():
layer_params[param_name] = param.data
module_layers[layer_name] = layer_params
module_params = self.get_module_params()
for param_name, param in module_params.items():
module_params[param_name] = param.data
save_dict['module_layers'] = module_layers
save_dict['module_params'] = module_params
if file == None:
file = self.__class__.__name__+'.npy'
np.save(file, save_dict)
print('Successfully saved model in {}'.format(file))
def load(self, file=None): # model.load() - loads the state of net from a file
print('loading model...')
if file == None:
file = self.__class__.__name__+'.npy'
load_dict = np.load(file, allow_pickle=True).item()
module_layers_stored = load_dict['module_layers']
module_params_stored = load_dict['module_params']
module_layers_actual = self.get_module_layers()
module_params_actual = self.get_module_params()
for layer_name, layer_stored in module_layers_stored.items():
if layer_name in module_layers_actual:
for param_name, param in layer_stored.items():
layer_actual = module_layers_actual[layer_name]
setattr(layer_actual, str(param_name), Parameter(data=param))
for param_name, param in module_params_stored.items():
if param_name in module_params_actual:
setattr(self,str(param_name), Parameter(data=param))
print('Successfully loaded model from {}'.format(file))
def get_module_layers(self): # returns a dictionary of parametrized layers in the module
attributes = self.__dict__
layers = ['Linear', 'Conv2d', 'ConvTranspose2d', 'LSTM', 'RNN', 'BatchNorm', 'Maxpool2d', 'Dropout']
module_layers = dict()
for name in attributes:
if attributes[name].__class__.__name__ in layers:
module_layers[name] = attributes[name]
return module_layers
def get_module_params(self): # returns a dictionary of parameters in the module
attributes = self.__dict__
module_params = dict()
for name in attributes:
if attributes[name].__class__.__name__ in ['Parameter']:
if attributes[name].eval_grad:
module_params[name] = attributes[name]
return module_params
def parameters(self): # access parameters of the module with this function
all_params = list()
for layer in list(self.get_module_layers().values()):
all_params.extend(layer.parameters())
all_params.extend(list(self.get_module_params().values()))
return all_params
# linear affine transformation: y = Wx + b
# the general feed-forward network
class Linear(Module):
def __init__(self, input_features=0, output_features=0, bias=True, graph=G):
super(Linear, self).__init__()
self.input_features = input_features # previous layer units
self.output_features = output_features # next layer units
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = np.sqrt(1. / self.input_features)
self.W = Parameter((self.output_features, self.input_features), uniform=True, low=-root_k, high=root_k, graph=self.graph) # weight volume
self.b = Parameter((self.output_features, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph) # bias vector
def __repr__(self):
return('Linear(input_features={}, output_features={}, bias={})'.format(
self.input_features, self.output_features, self.bias))
def __call__(self, x): # easy callable
return self.forward(x)
def forward(self, x):
# making the input compatible with graph operations
if not isinstance(x, Parameter):
x = Parameter(data=x, eval_grad=False, graph=self.graph)
# flatten the input if it came from layers like Conv2d
if len(x.shape) > 2:
x = self.graph.reshape(x)
# y = Wx + b
out = self.graph.dot(self.W, x) # matmul
if self.bias: # adding bias
out = self.graph.add(out, self.b, axis=(-1,))
return out
# 1D convolutional neural network
class Conv1d(Module):
def __init__(self, input_channels=None, output_channels=None, kernel_size=None, stride=(1,), padding=(0,), bias=True, graph=G):
super(Conv1d, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size,)
if not isinstance(stride, tuple):
stride = (stride,)
if not isinstance(padding, tuple):
padding = (padding,)
self.kernel_size = kernel_size
self.filter_size = (self.input_channels, *(self.kernel_size))
self.stride = stride
self.padding = padding
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = np.sqrt(1. / (self.input_channels * self.kernel_size[0]))
self.K = Parameter((self.output_channels, *self.filter_size), uniform=True, low=-root_k, high=root_k, graph=self.graph)
self.b = Parameter((self.output_channels, 1, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph)
def __repr__(self):
return('Conv1d({}, {}, kernel_size={}, stride={}, padding={}, bias={})'.format(
self.input_channels, self.output_channels, self.kernel_size, self.stride, self.padding, self.bias))
def __call__(self, x): # easy callable
return self.forward(x)
def forward(self, x):
if not isinstance(x, Parameter):
x = Parameter(data=x, eval_grad=False, graph=self.graph)
# convolution operation
out = self.graph.conv1d(x, self.K, self.stride, self.padding)
if self.bias: # adding bias
out = self.graph.add(out, self.b, axis=(-2, -1))
return out
# 2D convolutional neural network
class Conv2d(Module):
def __init__(self, input_channels=None, output_channels=None, kernel_size=None, stride=(1, 1), padding=(0, 0), bias=True, graph=G):
super(Conv2d, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, kernel_size)
if not isinstance(stride, tuple):
stride = (stride, stride)
if not isinstance(padding, tuple):
padding = (padding, padding)
self.kernel_size = kernel_size
self.filter_size = (self.input_channels, *(self.kernel_size))
self.stride = stride
self.padding = padding
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = np.sqrt(1. / (self.input_channels * self.kernel_size[0] * self.kernel_size[1]))
self.K = Parameter((self.output_channels, *self.filter_size), uniform=True, low=-root_k, high=root_k, graph=self.graph)
self.b = Parameter((self.output_channels, 1, 1, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph)
def __repr__(self):
return('Conv2d({}, {}, kernel_size={}, stride={}, padding={}, bias={})'.format(
self.input_channels, self.output_channels, self.kernel_size, self.stride, self.padding, self.bias))
def __call__(self, x): # easy callable
return self.forward(x)
def forward(self, x):
if not isinstance(x, Parameter):
x = Parameter(data=x, eval_grad=False, graph=self.graph)
# convolution operation
out = self.graph.conv2d(x, self.K, self.stride, self.padding)
if self.bias: # adding bias
out = self.graph.add(out, self.b, axis=(-3, -2, -1))
return out
# 2d transposed convolutional neural network
class ConvTranspose2d(Module):
def __init__(self, input_channels=None, output_channels=None, kernel_size=None, stride=(1, 1), padding=(0, 0), a=(0, 0), bias=True, graph=G):
super(ConvTranspose2d, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, kernel_size)
if not isinstance(stride, tuple):
stride = (stride, stride)
if not isinstance(padding, tuple):
padding = (padding, padding)
if not isinstance(a, tuple):
a = (a, a)
self.kernel_size = kernel_size
self.filter_size = (self.output_channels, *(self.kernel_size))
self.stride = stride
self.padding = padding
self.a = a # for fixing a single output shape over many possible
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = np.sqrt(1. / (self.output_channels * self.kernel_size[0] * self.kernel_size[1]))
self.K = Parameter((self.input_channels, *self.filter_size), uniform=True, low=-root_k, high=root_k, graph=self.graph)
self.b = Parameter((self.output_channels, 1, 1, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph)
def __repr__(self):
return('ConvTranspose2d({}, {}, kernel_size={}, stride={}, padding={}, a={}, bias={})'.format(
self.input_channels, self.output_channels, self.kernel_size, self.stride, self.padding, self.a, self.bias))
def __call__(self, x): # easy callable
return self.forward(x)
def forward(self, x):
if not isinstance(x, Parameter):
x = Parameter(data=x, eval_grad=False, graph=self.graph)
# convolution transpose operation
out = self.graph.conv_transpose2d(x, self.K, self.stride, self.padding, self.a)
if self.bias: # adding bias
out = self.graph.add(out, self.b, axis=(-3, -2, -1))
return out
# sequence models: LSTM cell
class LSTM(Module):
def __init__(self, input_size, hidden_size, bias=True, graph=G):
super(LSTM, self).__init__()
self.input_size = input_size # size of the input at each recurrent tick
self.hidden_size = hidden_size # size of hidden units h and c
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = np.sqrt(1. / self.hidden_size)
self.W_ih = Parameter((4*self.hidden_size, self.input_size), uniform=True, low=-root_k, high=root_k, graph=self.graph) # input to hidden weight volume
self.W_hh = Parameter((4*self.hidden_size, self.hidden_size), uniform=True, low=-root_k, high=root_k, graph=self.graph) # hidden to hidden weight volume
self.b_ih = Parameter((4*self.hidden_size, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph) # input to hidden bias vector
self.b_hh = Parameter((4*self.hidden_size, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph) # hidden to hidden bias vector
def __repr__(self):
return('LSTM(input_size={}, hidden_size={}, bias={})'.format(
self.input_size, self.hidden_size, self.bias))
def __call__(self, x, hidden): # easy callable
return self.forward(x, hidden)
def forward(self, x, hidden):
h, c = hidden
if not isinstance(x, Parameter):
x = Parameter(data=x, eval_grad=False, graph=self.graph)
i_h = self.graph.dot(self.W_ih, x)
if self.bias:
i_h = self.graph.add(i_h, self.b_ih, axis=(-1,))
h_h = self.graph.dot(self.W_hh, h)
if self.bias:
h_h = self.graph.add(h_h, self.b_hh, axis=(-1,))
gates = self.graph.add(i_h, h_h)
# forget, input, gate(also called cell gate - different from cell state), output gates of the lstm cell
# useful: http://colah.github.io/posts/2015-08-Understanding-LSTMs/
f, i, g, o = self.graph.split(gates, sections=4, axis=0)
f = self.graph.sigmoid(f)
i = self.graph.sigmoid(i)
g = self.graph.tanh(g)
o = self.graph.sigmoid(o)
c = self.graph.add(self.graph.multiply(f, c), self.graph.multiply(i, g))
h = self.graph.multiply(o, self.graph.tanh(c))
return (h, c)
# sequence models: RNN cell
class RNN(Module):
def __init__(self, input_size, hidden_size, bias=True, graph=G):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.graph = graph
self.init_params()
def init_params(self):
root_k = | np.sqrt(1. / self.hidden_size) | numpy.sqrt |
"""
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
"""
import logging
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from parse import parse
# Some tricks to make rasterio faster when using vsicurl
# see https://github.com/pangeo-data/cog-best-practices
RASTERIO_BEST_PRACTICES = dict(
CURL_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt",
GDAL_DISABLE_READDIR_ON_OPEN="EMPTY_DIR",
AWS_NO_SIGN_REQUEST="YES",
GDAL_MAX_RAW_BLOCK_CACHE_SIZE="200000000",
GDAL_SWATH_SIZE="200000000",
VSI_CURL_CACHE_SIZE="200000000",
)
NAIP_BLOB_ROOT = "https://naipblobs.blob.core.windows.net/naip"
LOGGER = logging.getLogger("main")
# Filter method
def filter_polygon(**kwargs):
"""Decides whether a predicted polygon is valid based on the range of the aspect
ratio and area stats from the Delmarava dataset.
"""
return all(
[
(
kwargs["distance_to_nearest_road"] is None
or kwargs["distance_to_nearest_road"] > 0
),
kwargs["rectangle_aspect_ratio"] > 3.4,
kwargs["rectangle_aspect_ratio"] < 20.49,
kwargs["rectangle_area"] > 525.69,
kwargs["rectangle_area"] < 8106.53,
]
)
# Helper function for parsing results
def convert_results_to_series(results):
series = {key: [] for key in results[0].keys()}
for epoch in results:
for k, v in epoch.items():
series[k].append(v)
return series
def parse_fn_parts(run):
parsed_run = None
if "_rotation" in run:
parsed_run = parse(
"{training_set}_{model}_{negative_sample_probability:f}_{lr:f}_rotation",
run,
).named
parsed_run["rotation"] = True
else:
parsed_run = parse(
"{training_set}_{model}_{negative_sample_probability:f}_{lr:f}", run
).named
parsed_run["rotation"] = False
return parsed_run
# Method for creating torch tensor from input chip
def chip_transformer(img):
img = img / 255.0
img = | np.rollaxis(img, 2, 0) | numpy.rollaxis |
import os
import ssl
from argparse import ArgumentParser
from keras.applications import ResNet50, VGG19, VGG16, InceptionV3, \
MobileNetV2, InceptionResNetV2, Xception, DenseNet201, MobileNet, \
NASNetMobile, NASNetLarge
from keras.applications.imagenet_utils import preprocess_input
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import load_model
ssl._create_default_https_context = ssl._create_unverified_context
MODELS = {
'DenseNet201': {'IMG_SIZE': 224, 'PROCESSING': 'torch', 'TRANSFER_LEARNING': DenseNet201},
'MobileNetV2': {'IMG_SIZE': 224, 'PROCESSING': 'tf', 'TRANSFER_LEARNING': MobileNetV2},
'VGG19': {'IMG_SIZE': 224, 'PROCESSING': 'caffe', 'TRANSFER_LEARNING': VGG19},
'NASNetMobile': {'IMG_SIZE': 224, 'PROCESSING': 'tf', 'TRANSFER_LEARNING': NASNetMobile},
'InceptionResNetV2': {'IMG_SIZE': 299, 'PROCESSING': 'tf', 'TRANSFER_LEARNING': InceptionResNetV2},
'InceptionV3': {'IMG_SIZE': 299, 'PROCESSING': 'tf', 'TRANSFER_LEARNING': InceptionV3},
'ResNet50': {'IMG_SIZE': 224, 'PROCESSING': 'caffe', 'TRANSFER_LEARNING': ResNet50},
'Xception': {'IMG_SIZE': 299, 'PROCESSING': 'tf', 'TRANSFER_LEARNING': Xception},
}
def reshape_data(data, pretrained_model):
return np.reshape(data, (
len(data), pretrained_model.output_shape[1] *
pretrained_model.output_shape[2] *
pretrained_model.output_shape[3]))
def get_feat_count(output_shape):
count = 1
for i in range(1, len(output_shape)):
count = count * output_shape[i]
return count
def get_pretrained_model(model_name, photos_type, model_dir=False, weights_dir=False):
model_dir = model_dir if model_dir else os.path.join(os.path.curdir, 'models')
weights_dir = os.path.join(os.path.curdir, weights_dir) if weights_dir else os.path.join(os.path.curdir, 'weights')
print(weights_dir)
for model in os.listdir(weights_dir):
print(model)
if model_name.lower() in model.lower() and photos_type in model.lower():
print(os.path.join(model_dir, model))
model_path = '{}.h5'.format(model.replace('/weights/', '/models/').replace('_weights_', '_model_').split('_score')[0])
print(model_path)
pretrained_model = load_model(os.path.join(os.path.join(model_dir, model_path)))
pretrained_model.load_weights(os.path.join(os.path.join(weights_dir, model)))
print('Loaded model: {}'.format(model_name))
return pretrained_model
raise Exception('no model found')
def eval_model(train_dir, model_name, weights, evaluation=False):
print(model_name)
model = MODELS[model_name]
img_size = model['IMG_SIZE']
orig_model = model['TRANSFER_LEARNING'](
weights='imagenet', include_top=False, input_shape=(img_size, img_size, 3))
datagen = ImageDataGenerator()
generator = datagen.flow_from_directory(
train_dir,
target_size=(model['IMG_SIZE'], model['IMG_SIZE']),
batch_size=1,
class_mode='binary',
shuffle=False)
if evaluation:
return generator.labels[0:len(generator.labels)+1]
photos_type = 'photoshop' if 'photoshop' in train_dir else 'flipped' if 'flipped' in train_dir else 'gan'
print(photos_type)
pretrained_model = get_pretrained_model(model_name, photos_type, weights_dir=weights)
predictions = []
score = 0
for inputs_batch, labels_batch in generator:
features_batch = orig_model.predict(
preprocess_input(inputs_batch, mode=model['PROCESSING']))
op_shape = features_batch.shape
features_batch = np.reshape(features_batch, (
inputs_batch.shape[0], op_shape[-3] * op_shape[-2] * op_shape[-1]))
prediction = pretrained_model.predict(features_batch, verbose=0)
if np.round(prediction[0], 2) >= 0.5:
predictions.append(1)
else:
predictions.append(0)
scores = pretrained_model.evaluate(features_batch, labels_batch, verbose=0)
score += scores[1] * 100
if len(predictions) >= len(generator.labels):
break
print('Total score for: {} - {}%'.format(model_name, round(score / len(predictions), 2)))
return predictions
def main():
parser = ArgumentParser(__doc__)
parser.add_argument("--eval", required=False,
help="folder containing two directories for evaluation")
parser.add_argument("--pic", required=False,
help="picture for prediction")
parser.add_argument("--predict", required=False,
help="directory for predictions")
parser.add_argument("--weights", required=True, default='./weights/',
help="directory for weights")
args = parser.parse_args()
if args.eval:
eval_dir = os.path.relpath(args.eval)
any_model = list(MODELS.keys())[0]
truth = eval_model(eval_dir, any_model, evaluation=True, weights=args.weights)
print('Truth: {}'.format(truth))
votes = []
combined_votes = []
for key, _ in MODELS.items():
single_model_vote = eval_model(eval_dir, key, weights=args.weights)
print('{}: predictions {}'.format(key, single_model_vote))
votes.append(single_model_vote)
item = 0
for element in single_model_vote:
try:
combined_votes[item] = (combined_votes[item] + element) / 2
except IndexError:
combined_votes.append(element)
item += 1
print('{}: combined {}'.format(key, combined_votes))
# average_votes = np.average(np.array(votes), axis=0)
# print(average_votes)
final_predictions = np.where( | np.array(combined_votes) | numpy.array |
from abc import ABCMeta
from abc import abstractmethod
from ..np_utils import ops
import numpy as np
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results, use_matmul_gather=False):
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != np.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
# TODO
# if use_matmul_gather:
# self._gather_op = ops.matmul_gather_on_zeroth_axis
@property
def match_results(self):
return self._match_results
def matched_column_indices(self):
return self._reshape_and_cast(np.where(np.greater(self._match_results, -1)))
def matched_column_indicator(self):
return np.greater_equal(self._match_results, 0)
def num_matched_columns(self):
return np.size(self.matched_column_indices())
def unmatched_column_indices(self):
return self._reshape_and_cast(np.where(np.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
return | np.equal(self._match_results, -1) | numpy.equal |
# Python program to create
# Image Classifier using CNN
# Importing the required libraries
import cv2
import os
import numpy as np
from random import shuffle
from tqdm import tqdm
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
'''Setting up the env'''
TRAIN_DIR = "v_data\\train"
TEST_DIR = "v_data\\test"
IMG_SIZE = 100
LR = 1e-3
'''Setting up the model which will help with tensorflow models'''
MODEL_NAME = 'smollan-{}-{}.model'.format(LR, '6conv-basic')
def get_label_name(path):
dirs = os.listdir(path)
return np.identity(len(dirs),dtype=int), dirs
def create_train_data():
i = 0
training_data = []
labels,dirs = get_label_name(TRAIN_DIR)
images = []
j = 0
label_dirs = get_all_image_names(TRAIN_DIR)
for _img in label_dirs:
img = cv2.imread(_img, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
if(_img.find("cars") != -1):
training_data.append([ | np.array(img) | numpy.array |
"""
A module for a fast looping through a dataset (using python's multiprocessing)
Author: <NAME> (<EMAIL>) and <NAME>
Last update: March 9, 2014
"""
import multiprocessing
import numpy as np
import h5py, os, sys, time
import utils
__all__ = ['FastLoop']
def _worker_call(worker, pipe):
work_package = pipe.recv()
while work_package != "fika":
pipe.send(worker(work_package))
work_package = pipe.recv()
class FastLoop(object):
def __init__(self, inputfile, outputfile, Nprocesses, Njobs, loopfunc, updaterate, *args, **kwargs):
self.inputfile = inputfile
self.outputfile = outputfile
self.index = 0
self.Nprocesses = Nprocesses
self.Njobs = Njobs
self.framerate_update = updaterate
self.loopfunc = loopfunc
self.args = args
self.kwargs = kwargs
self.load()
def load(self):
with h5py.File(self.inputfile, 'r') as datafile: self.input = {item:datafile[item][:] for item in datafile.keys()}
with h5py.File(self.outputfile, 'r') as datafile: self.output = {item:datafile[item][:] for item in datafile.keys()}
#pass
#@profile
def next(self):
self.index += 1
#with h5py.File(self.inputfile, 'r') as datafile: data = datafile['data'][self.index-1]
data = {item:self.input[item][self.index-1] for item in self.input}
input = {'i':self.index-1, 'data':data, 'args':self.args, 'kwargs':self.kwargs}
return input
#@profile
def save(self, res):
for k in res:
#with h5py.File(self.outputfile, 'a') as file:
# if not k == 'i':
# file[k][res['i']] = res[k]
if not k == 'i':
self.output[k][res['i']] = res[k]
#if res[k].ndim == 0:
# self.output[k][res['i']] = res[k]
#else:
# self.output[k][res['i']][...] = res[k]
#@profile
def write(self, progress = 1.):
utils.progressbar(progress, 'Saving results to file')
with h5py.File(self.outputfile, 'a') as datafile:
for item in self.output: datafile[item][...] = self.output[item]
#@profile
def start(self):
# Decide how many parallel processes to use
available_cpus = multiprocessing.cpu_count()
if self.Nprocesses == 'max': self.Nprocesses = available_cpus
elif self.Nprocesses > available_cpus: self.Nprocesses = available_cpus
# Initialize lists keeping track of read/write pipes
pipes_end_host = list(np.zeros(self.Nprocesses))
pipes_end_worker = list(np.zeros(self.Nprocesses))
processes = list(np.zeros(self.Nprocesses))
# Start multiple processes
for i in range(self.Nprocesses):
pipes_end_host[i], pipes_end_worker[i] = multiprocessing.Pipe()
processes[i] = multiprocessing.Process(target=_worker_call, args=(self.loopfunc, pipes_end_worker[i],) )
processes[i].start()
# Variables to keep track of jobs started/done
Njobs_done = 0
Njobs_started = 0
# Send initial jobs
for r in pipes_end_host:
r.send(self.next())
Njobs_started += 1
# Some parameters for diagnostics
t_start = time.time()
message = 'Datarate %.2f Hz; job %i/%i; process'
# This is the main loop, it waits for new jobs and sends the input
while Njobs_done < self.Njobs:
for r in pipes_end_host:
if r.poll():
result = r.recv()
if Njobs_started < self.Njobs:
r.send(self.next())
Njobs_started += 1
self.save(result)
Njobs_done += 1
# Give some feedback to the command line once in a while
if ((Njobs_done + 1) % self.framerate_update) == 0:
progress = float(Njobs_done) / self.Njobs
datarate = (Njobs_done + 1) / (time.time() - t_start)
utils.progressbar(progress, message %(datarate, Njobs_done + 1, self.Njobs), t_start)
# Close all processes
for i in range(self.Nprocesses):
pipes_end_host[i].send('fika')
processes[i].join()
pipes_end_host[i].close()
# ===================================
# Some code for testing/profiling
# ===================================
if __name__ == '__main__':
from fitting import fit_photon_histograms
import logging
logging.captureWarnings(True)
histfile = h5py.File('testing/histogram.h5', 'r')
Hmap = histfile['data/histogram']
Hbinsize = histfile['data/histogramBinsize'][0]
Hcount = histfile['data/histogramCount'][0]
Hmin = histfile['data/histogramMin'][0]
Hnbins = histfile['data/histogramNbins'][0]
Hbins = | np.linspace(Hmin, Hnbins + Hmin - 1, Hnbins) | numpy.linspace |
import os
import sys
import numpy as np
import _pickle as pkl
import matplotlib.pyplot as plt
import plotly.express as px
from rulefit import RuleFit
from sklearn.model_selection import train_test_split
import itertools
import pandas as pd
from colorsys import hsv_to_rgb
from tqdm import tqdm
from sklearn.tree import _tree
from utils import *
from scipy.stats import random_correlation
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier
from sklearn.base import clone
import plotly.graph_objects as go
class FeatureVec(object):
"Feature-vector class."
def __init__(
self, mode, max_depth=3, feature_names=None, max_sentences=20000,
exp_rand_tree_size=True, tree_generator=None,
):
'''
mode: 'classify' or 'regress'
max_depth: maximum depth of trained trees
feature_names: names of features
max_sentences: maximum number of extracted sentences
exp_rand_tree_size: Having trees with different sizes
tree_generator: Tree generator model (overwrites above features)
'''
self.feature_names = feature_names
self.mode = mode
max_leafs = 2 ** max_depth
num_trees = max_sentences // max_leafs
if tree_generator is None:
tree_generator = RandomForestClassifier(num_trees, max_depth=max_depth)
self.exp_rand_tree_size = exp_rand_tree_size
self.rf = RuleFit(
rfmode=mode, tree_size=max_leafs, max_rules=max_sentences, tree_generator=tree_generator,
exp_rand_tree_size=True, fit_lasso=False, Cs=10.**np.arange(-4, 1), cv=3)
def fit(self, X, y, restart=True, bagging=0):
'''Fit the tree model.
X: inputs
y: outputs (integer class label or real value)
restart: To train from scratch tree generator model
bagging: If >0 applies bagging on trees to compute confidence intervals
'''
if not bagging:
bagging = 0
dimred = TruncatedSVD(2)
self.rf.fit(X, y, restart=restart)
rules = self.rf.get_rules()['rule'].values
cm = cooccurance_matrix(rules, X.shape[-1])
vectors = dimred.fit_transform(cm)
vectors = normalize_angles(vectors)
self.norms = np.clip(np.linalg.norm(vectors, axis=-1, keepdims=True), 1e-12, None)
vectors /= np.max(self.norms)
self.vectors = vectors
self.importance = | np.linalg.norm(self.vectors, axis=-1) | numpy.linalg.norm |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""single dataloader"""
import os.path
from PIL import Image
import numpy as np
from src.data.base_dataloader import getSoft, getfeats
def soft_border_process(opt, mask, center, rws, rhs):
"""soft_border_process"""
imgsize = opt.fineSize
maskn = mask[0].asnumpy()
masks = [np.ones([imgsize, imgsize]), np.ones([imgsize, imgsize]), np.ones([imgsize, imgsize]),
np.ones([imgsize, imgsize])]
masks[0][1:] = maskn[:-1]
masks[1][:-1] = maskn[1:]
masks[2][:, 1:] = maskn[:, :-1]
masks[3][:, :-1] = maskn[:, 1:]
masks2 = [maskn - e for e in masks]
bound = np.minimum.reduce(masks2)
bound = -bound
xb = []
yb = []
for i in range(4):
xbi = [center[i, 0] - rws[i] / 2, center[i, 0] + rws[i] / 2 - 1]
ybi = [center[i, 1] - rhs[i] / 2, center[i, 1] + rhs[i] / 2 - 1]
for j in range(2):
maskx = bound[:, xbi[j]]
masky = bound[ybi[j], :]
xb += [(1 - maskx) * 10000 + maskx * xbi[j]]
yb += [(1 - masky) * 10000 + masky * ybi[j]]
soft = 1 - getSoft([imgsize, imgsize], xb, yb)
soft = | np.expand_dims(soft, axis=0) | numpy.expand_dims |
# -*- coding: utf-8 -*-
# _realizeNTF_ct.py
# Module providing the realizeNTF_ct function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the realizeNTF_ct() function
"""
from __future__ import division, print_function
from warnings import warn
import numpy as np
import numpy.linalg as linalg
from scipy.signal import dimpulse, ss2zpk
from ._evalTFP import evalTFP
from ._impL1 import impL1
from ._padb import padb
from ._pulse import pulse
from ._utils import _get_zpk, carray, eps
def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None,
ABCDc=None, method='LOOP'):
"""Realize an NTF with a continuous-time loop filter.
**Parameters:**
ntf : object
A noise transfer function (NTF).
form : str, optional
A string specifying the topology of the loop filter.
* 'FB': Feedback form,
* 'FF': Feedforward form
For the FB structure, the elements of ``Bc`` are calculated
so that the sampled pulse response matches the L1 impulse
response. For the FF structure, ``Cc`` is calculated.
tdac : sequence, optional
The timing for the feedback DAC(s). If ``tdac[0] >= 1``,
direct feedback terms are added to the quantizer.
Multiple timings (one or more per integrator) for the FB
topology can be specified by making tdac a list of lists,
e.g. ``tdac = [[1, 2], [1, 2], [[0.5, 1], [1, 1.5]], []]``
In this example, the first two integrators have
DACs with ``[1, 2]`` timing, the third has a pair of
DACs, one with ``[0.5, 1]`` timing and the other with
``[1, 1.5]`` timing, and there is no direct feedback
DAC to the quantizer.
ordering : sequence, optional
A vector specifying which NTF zero-pair to use in each resonator
Default is for the zero-pairs to be used in the order specified
in the NTF.
bp : sequence, optional
A vector specifying which resonator sections are bandpass.
The default (``zeros(...)``) is for all sections to be lowpass.
ABCDc : ndarray, optional
The loop filter structure, in state-space form.
If this argument is omitted, ABCDc is constructed according
to "form."
method : str, optional
The default fitting method is ``'LOOP'``, which means that
the DT and CT loop responses will be matched.
Alternatively, it is possible to set the method to ``'NTF'``,
which will result in the NTF responses to be matched.
See :ref:`discrete-time-to-continuous-time-mapping` for a
more in-depth discussion.
**Returns:**
ABCDc : ndarray
A state-space description of the CT loop filter
tdac2 : ndarray
A matrix with the DAC timings, including ones
that were automatically added.
**Example:**
Realize the NTF :math:`(1 - z^{-1})^2` with a CT system (cf with the
example at :func:`mapCtoD`).::
from deltasigma import *
ntf = ([1, 1], [0, 0], 1)
ABCDc, tdac2 = realizeNTF_ct(ntf, 'FB')
Returns:
ABCDc::
[[ 0. 0. 1. -1. ]
[ 1. 0. 0. -1.49999999]
[ 0. 1. 0. 0. ]]
tdac2::
[[-1. -1.]
[ 0. 1.]]
"""
ntf_z, ntf_p, _ = _get_zpk(ntf)
ntf_z = carray(ntf_z)
ntf_p = carray(ntf_p)
order = max(ntf_p.shape)
order2 = int(np.floor(order/2.))
odd = order - 2*order2
# compensate for limited accuracy of zero calculation
ntf_z[np.abs(ntf_z - 1) < eps**(1./(1. + order))] = 1.
method = method.upper()
if method not in ('LOOP', 'NTF'):
raise ValueError('Unimplemented matching method %s.' % method)
# check if multiple timings mode
if (type(tdac) == list or type(tdac) == tuple) and len(tdac) and \
(type(tdac[0]) == list or type(tdac[0]) == tuple):
if len(tdac) != order + 1:
msg = 'For multi-timing tdac, len(tdac) ' + \
' must be order+1.'
raise ValueError(msg)
if form != 'FB':
msg = "Currently only supporting form='FB' " + \
'for multi-timing tdac'
raise ValueError(msg)
multi_timing = True
else: # single timing
tdac = carray(tdac)
if np.prod(tdac.shape) != 2:
msg = 'For single-timing tdac, len(tdac) must be 2.'
raise ValueError(msg)
tdac.reshape((2,))
multi_timing = False
if ordering is None:
ordering = np.arange(order2)
if bp is None:
bp = np.zeros((order2,))
if not multi_timing:
# Need direct terms for every interval of memory in the DAC
n_direct = np.ceil(tdac[1]) - 1
if tdac[0] > 0 and tdac[0] < 1 and tdac[1] > 1 and tdac[1] < 2:
n_extra = n_direct - 1 # tdac pulse spans a sample point
else:
n_extra = n_direct
tdac2 = np.vstack(
(np.array((-1, -1)),
np.array(tdac).reshape((1, 2)),
0.5*np.dot(np.ones((n_extra, 1)), np.array([[-1, 1]]))
+ np.cumsum(np.ones((n_extra, 2)), 0) + (n_direct - n_extra)
))
else:
n_direct = 0
n_extra = 0
if ABCDc is None:
ABCDc = np.zeros((order + 1, order + 2))
# Stuff the A portion
if odd:
ABCDc[0, 0] = np.real(np.log(ntf_z[0]))
ABCDc[1, 0] = 1
dline = np.array([0, 1, 2])
for i in range(order2):
n = bp[i]
i1 = 2*i + odd
zi = 2*ordering[i] + odd
w = np.abs(np.angle(ntf_z[zi]))
ABCDc[i1 + dline, i1] = np.array([0, 1, n])
ABCDc[i1 + dline, i1 + 1] = np.array([-w**2, 0, 1 - n])
ABCDc[0, order] = 1
# 2006.10.02 Changed to -1 to make FF STF have +ve gain at DC
ABCDc[0, order + 1] = -1
Ac = ABCDc[:order, :order]
if form == 'FB':
Cc = ABCDc[order, :order].reshape((1, -1))
if not multi_timing:
Bc = np.hstack((np.eye(order), np.zeros((order, 1))))
Dc = np.hstack((np.zeros((1, order)), np.array([[1]])))
tp = np.tile(np.array(tdac).reshape((1, 2)), (order + 1, 1))
else: #Assemble tdac2, Bc and Dc
tdac2 = np.array([[-1, -1]])
Bc = None
Dc = None
Bci = np.hstack((np.eye(order), np.zeros((order, 1))))
Dci = np.hstack(( | np.zeros((1, order)) | numpy.zeros |
import math
import numpy as np
from onireader._onireader import Device as _Device
from onireader._onireader import ANY_DEVICE, PixelFormat
class Intrinsics:
def __init__(self, fx, fy, cx, cy):
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
def __str__(self):
return "Intrinsics: fx={}, fy={}, cx={}, cy={}".format(
self.fx, self.fy, self.cx, self.cy)
def __repr__(self):
return str(self)
class Device(_Device):
def __init__(self):
super(Device, self).__init__()
def open(self, device_uri=None):
if device_uri is None:
device_uri = ""
return super(Device, self).open(device_uri)
def find_best_fit_modes(self, width, height,
depth_format=PixelFormat.DEPTH_1_MM,
rgb_format=PixelFormat.RGB888):
depth_vmodes = self.get_depth_video_modes()
rgb_vmodes = self.get_color_video_modes()
target_res = | np.array([width, height]) | numpy.array |
import numpy as np
import torch
from torch.backends import cudnn
cudnn.enabled = True
import voc12.data
import scipy.misc
import importlib
from torch.utils.data import DataLoader
import torchvision
from tool import imutils, pyutils
import argparse
from PIL import Image
import torch.nn.functional as F
import os.path
import matplotlib.pyplot as plt
import imageio
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--weights", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\voc12\vgg_cls.pth", type=str),
parser.add_argument("--network", default="network.vgg16_cls", type=str),
parser.add_argument("--infer_list", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\voc12\val.txt", type=str)
parser.add_argument("--num_workers", default=1, type=int)
parser.add_argument("--voc12_root", default = r"C:\Users\johny\Desktop\Transformer-Explainability-main\ours\VOCdevkit\VOC2012", required=False, type=str)
parser.add_argument("--low_alpha", default=4, type=int)
parser.add_argument("--high_alpha", default=32, type=int)
parser.add_argument("--out_cam", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\out_cam", type=str)
parser.add_argument("--out_la_crf", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\out_la_crf", type=str)
parser.add_argument("--out_ha_crf", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\out_ha_crf", type=str)
parser.add_argument("--out_cam_pred", default=r"C:\Users\johny\Desktop\Transformer-Explainability-main\psa-master\out_cam_pred", type=str)
args = parser.parse_args()
model = getattr(importlib.import_module(args.network), 'Net')()
model.load_state_dict(torch.load(args.weights))
model.eval()
model.cuda()
infer_dataset = voc12.data.VOC12ClsDatasetMSF(args.infer_list, voc12_root=args.voc12_root,
scales=(1, 0.5, 1.5, 2.0),
inter_transform=torchvision.transforms.Compose(
[np.asarray,
model.normalize,
imutils.HWC_to_CHW]))
infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=args.num_workers, pin_memory=True)
n_gpus = torch.cuda.device_count()
model_replicas = torch.nn.parallel.replicate(model, list(range(n_gpus)))
for iter, (img_name, img_list, label) in enumerate(infer_data_loader):
img_name = img_name[0]; label = label[0]
img_path = voc12.data.get_img_path(img_name, args.voc12_root)
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
def _work(i, img):
with torch.no_grad():
with torch.cuda.device(i%n_gpus):
cam = model_replicas[i%n_gpus].forward_cam(img.cuda())
cam = F.upsample(cam, orig_img_size, mode='bilinear', align_corners=False)[0]
cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
if i % 2 == 1:
cam = np.flip(cam, axis=-1)
return cam
thread_pool = pyutils.BatchThreader(_work, list(enumerate(img_list)),
batch_size=12, prefetch_size=0, processes=args.num_workers)
cam_list = thread_pool.pop_results()
# cam_list = [np.asarray(cam_list)]
sum_cam = np.sum(cam_list, axis=0)
norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5)
cam_dict = {}
for i in range(20):
if label[i] > 1e-5:
cam_dict[i] = norm_cam[i]
if args.out_cam is not None:
| np.save(args.out_cam +"/"+ img_name + '.npy', cam_dict) | numpy.save |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 14:40:17 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
def fo_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
g, h = np.zeros(n), np.zeros(n)
for i in range(n):
h[i] = eps
g[i] = (f(x+h, *args) - f(x, *args)) / eps
h[i] = 0
return g
def so_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
H, hi, hj = np.zeros((n, n)), np.zeros(n), | np.zeros(n) | numpy.zeros |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import init_ops
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, use_gpu):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t1 = init1([num]).eval()
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t2 = init2([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, use_gpu, graph_seed):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
graph_seed: A graph-level seed to use.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init([num]).eval()
t2 = init([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num, use_gpu):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
use_gpu: Use gpu if true.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=use_gpu):
return init([num]).eval()
return func
class ConstantInitializersTest(tf.test.TestCase):
def testZerosInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape, initializer=tf.zeros_initializer)
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape, initializer=tf.ones_initializer)
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape,
initializer=tf.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session():
shape = [2, 3]
x = tf.get_variable("x", shape=shape,
initializer=tf.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
class RandomNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.random_normal_initializer, 0.0, 1.0, dtype=tf.string)
class TruncatedNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.truncated_normal_initializer, 0.0, 1.0, dtype=tf.string)
class RandomUniformInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.random_uniform_initializer, 0.0, 1.0, dtype=tf.string)
class UniformUnitScalingInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
for dtype in [tf.float32, tf.float64]:
init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = tf.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
self.assertFalse(identicaltest(self, init1, init3, use_gpu))
self.assertFalse(identicaltest(self, init2, init3, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
tf.uniform_unit_scaling_initializer, dtype=tf.string)
class RandomWalkShapeTest(tf.test.TestCase):
def testRandomWalk(self):
# Fully known shape.
rnd1 = init_ops._random_walk([1, 2], tf.nn.relu)
self.assertEqual([1, 2], rnd1.get_shape())
# TODO(vrv): move to sequence_ops_test?
class RangeTest(tf.test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session():
tf_ans = tf.range(start, limit, delta, name="range")
self.assertEqual([len(range(start, limit, delta))], tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(np.array_equal(
self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(tf.range(0, 5, 1).dtype, tf.int32)
def testLimitOnly(self):
with self.test_session():
self.assertAllEqual(np.arange(5), tf.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(tf.test.TestCase):
def _gpu_modes(self):
if tf.test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
with tf.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
tf_ans = tf.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 3),
np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 4),
np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 2),
| np.array([-1., -5.]) | numpy.array |
from interface.terminal.cmd_info import error
import pdb
import inspect
import csv, yaml
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from csv import writer
from analysis.mesure import *
def zero_to_nan(values):
"""Replace every 0 with 'nan' and return a copy."""
return [float('nan') if x==0 else x for x in values]
def divergence_aux(f):
"""
Computes the divergence of the vector field f, corresponding to dFx/dx + dFy/dy + ...
:param f: List of ndarrays, where every item of the list is one dimension of the vector field
:return: Single ndarray of the same shape as each of the items in f, which corresponds to a scalar field
"""
num_dims = len(f)
return np.ufunc.reduce(np.add, [np.gradient(f[i], axis=i) for i in range(num_dims)])
def precision_indicators(Cd,Cl,save_field_x_ite, noise=True):
if noise:
Cd_max = np.max(Cd[int(len(Cd)*(3/4))::save_field_x_ite])
Cd_min = np.min(Cd[int(len(Cd)*(3/4))::save_field_x_ite])
Cl_max = np.max(Cl[int(len(Cl)*(3/4))::save_field_x_ite])
Cl_min = np.min(Cl[int(len(Cl)*(3/4))::save_field_x_ite])
else:
Cd_max = np.max(Cd)
Cd_min = np.min(Cd)
Cl_max = np.max(Cl)
Cl_min = np.min(Cl)
Cl = zero_to_nan(Cl)
Cd = zero_to_nan(Cd)
Cl_mean = np.nanmean(Cl[save_field_x_ite:])
Cd_mean = np.nanmean(Cd[save_field_x_ite:])
DCd = (Cd_max-Cd_min)
DCl = (Cl_max-Cl_min)
center = [Cd_min + DCd/2 , Cl_min + DCl/2]
return Cd_max, Cd_min, Cd_mean, Cl_max, Cl_min, Cl_mean, center, DCd, DCl
def calculate_coeficients(in_dir, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly, xD, D, distance=0.2):
DOMAIN = Domain(x=Nx, y=Ny, boundaries=OPEN, bounds=Box[0:Lx, 0:Ly])
CYLINDER_2 = HardGeometryMask(Sphere([xD, Ly/2], radius=D/2 + distance )) >> DOMAIN.scalar_grid()
p_field = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_pressure_field.npy')
ite = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_iteration_field.npy')
vforce=np.zeros(Nt)
hforce=np.zeros(Nt)
for z, itera in enumerate(ite):
hforce[itera], vforce[itera] = calculate_forces(p_field[z,0], CYLINDER_2, dx, dy)
t, t_null_vel, _, _, _ = detect_stationary(vforce, Nt, dt)
if t_null_vel <= Nt/3:
t_null_vel = int(Nt/2)
#PASS FORCE TO COEFICIENT
density=1
V0=1
Cl = (vforce*2)/(density*V0*V0*D)
Cd = (hforce*2)/(density*V0*V0*D)
return t, t_null_vel, Cl, Cd
def calculate_performance_coeficients(in_dir, alpha, Re, Nx, Ny):
with open(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_performance_results.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
poisson_time = 0
ite_time = 0
unet_inference_time = 0
cg_inference_time = 0
for row in csv_reader:
if row[1] == "run_init":
time_init = float(row[2])
if row[1] == "run_end":
total_time = float(row[2]) - time_init
if "init_iteration_" in row[1]:
ite_init = float(row[2])
if "_>init_post" in row[1]:
ite_time = ite_time + (float(row[2]) - ite_init)
if "init_poisson__" in row[1]:
poisson_init = float(row[2])
if "end_poisson__" in row[1]:
poisson_time = poisson_time + (float(row[2])-poisson_init)
if "_>UNET_inference_interval" in row[1]:
unet_inference_time += float(row[3])
if "_>CG_inference_interval" in row[1]:
cg_inference_time += float(row[3])
return total_time, ite_time, poisson_time, unet_inference_time, cg_inference_time
####################
def divergence_time(in_dir, out_dir, meta_out_dir, ax, fig, last_ite, dir_name, alpha, Re, Nx, Ny, y_lim=[], meta=False, meta_within_case=False, meta_within_alpha=False):
if not meta and not meta_within_case and not meta_within_alpha:
fig, ax = plt.subplots()
#1.Import the velocity fields
u_field = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_velocity_x_field.npy')
v_field = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_velocity_y_field.npy')
ite = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_iteration_field.npy')
#2.Calculate the divergence field
dive = []
for j, _ in enumerate(ite):
dive.append( np.sum( divergence_aux([u_field[j,0],v_field[j,0]]) ) / (Nx*Ny) )
#3.Plot
saux = r"$\alpha$="
newline = "\n"
if meta:
mlabel = f'{dir_name} {saux}{alpha}'
elif meta_within_alpha:
mlabel = f'{dir_name}'
elif meta_within_case:
mlabel = f'{saux}{alpha}'
else:
mlabel = []
ax.plot(ite[1:], dive[1:], '--o', label=mlabel if mlabel else "")
if last_ite: # or (not meta and not meta_within_case and not meta_within_alpha):
ax.set(xlabel='iteration', ylabel='Average Divergence', title=f'{f"{dir_name + newline}" if not meta and not meta_within_alpha else ""}' + f'[' + f'{ f"Alpha={alpha}, " if not meta and not meta_within_case else ""}' +f'Re={Re}, N=[{Nx}x{Ny}] ]\n')
if mlabel:
ax.legend()
if not y_lim:
pass
else:
ax.set_ylim([y_lim[0], y_lim[1]])
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)
ax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)
fig.savefig(f"{meta_out_dir if (meta or meta_within_alpha) else out_dir}A_{alpha if not meta_within_case and not meta else 'XX'}_RE_{Re}_dx_{Nx}_{Ny}_Divergence_Average_Residual.png")
plt.close()
def divergence_timestep(in_dir, out_dir, dir_name, alpha, Re, Nx, Ny, Lx, Ly, xD, D, dx, dy, time_step=20, zoom=True):
zoom_position=[xD-D,xD+D,Ly/2-D,Ly/2+D]
if zoom:
[x1,x2,y1,y2] = zoom_position
x1 = int(x1/dx)
x2 = int(x2/dx)
y1 = int(y1/dy)
y2 = int(y2/dy)
else:
x = np.arange(0, Lx +dx/2, dx)
y = np.arange(0, Ly +dy/2, dy)
x1 = 0
x2 = len(x)
y1 = 0
y2 = len(y)
u_field = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_velocity_x_field.npy')
v_field = np.load(f'{in_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_velocity_y_field.npy')
fig, ax = plt.subplots(nrows=1, figsize=(6, 5.4))
im = ax.imshow( divergence_aux([u_field[time_step,0][x1:x2,y1:y2],v_field[time_step,0][x1:x2,y1:y2]]) , vmin= -np.max(np.abs( divergence_aux([u_field[time_step,0][x1:x2,y1:y2],v_field[time_step,0][x1:x2,y1:y2]]) )), vmax= np.max(np.abs( divergence_aux([u_field[time_step,0][x1:x2,y1:y2],v_field[time_step,0][x1:x2,y1:y2]]) )),
interpolation='bilinear', cmap =plt.get_cmap('seismic'))
saux = r"$\alpha$"
newline = "\n"
ax.set_title(f'{dir_name+newline}Divergence @ iteration={time_step} [{saux}={alpha} Re={Re}, N=[{Nx}x{Ny}] ]')
cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax)
cbar.set_label('divergence')
fig.savefig(f"{out_dir}A_{alpha}_RE_{Re}_dx_{Nx}_{Ny}_Divergence_snap.png")
def forces_time(in_dir, out_dir, meta_out_dir, ax, fig, last_ite, save_field_x_ite, dir_name, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly, xD, D, y_lim=[], distance=0.2, meta=False, meta_within_case=False, meta_within_alpha=False):
if not meta and not meta_within_case and not meta_within_alpha:
fig, ax = plt.subplots()
#1.Calculate the forces coeficients
t, t_null_vel, Cl, Cd = calculate_coeficients(in_dir, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly, xD, D, distance)
#2.1.Plot the coeficients over time
saux = r"$\alpha$="
newline = "\n"
if meta:
mlabel = f'{dir_name} {saux}{alpha}'
elif meta_within_alpha:
mlabel = f'{dir_name}'
elif meta_within_case:
mlabel = f'{saux}{alpha}'
else:
mlabel = []
ax.plot(t[::save_field_x_ite], Cl[::save_field_x_ite], label='Cl ' + mlabel if mlabel else "Cl")
ax.plot(t[::save_field_x_ite], Cd[::save_field_x_ite], label='Cd ' + mlabel if mlabel else "Cd")
#2.2.Figure labels and saving
if last_ite:
ax.set(xlabel='time [s]', ylabel='Coefficient', title=f'{f"{dir_name + newline}" if not meta and not meta_within_alpha else ""}' + f'Force probe [' + f'{ f"Alpha={alpha}, " if not meta and not meta_within_case else ""}' +f'Re={Re}, N=[{Nx}x{Ny}] ]')
ax.legend()
if not y_lim:
pass
else:
ax.set_ylim([y_lim[0], y_lim[1]])
ax.xaxis.set_major_locator(MultipleLocator((Nt*dt)/10))
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)
ax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)
fig.savefig(f"{meta_out_dir if (meta or meta_within_alpha) else out_dir}A_{alpha if not meta_within_case and not meta else 'XX'}_RE_{Re}_dx_{Nx}_{Ny}_d_{distance}_Forces_time.png")
plt.close()
def forces_phase_diagram(in_dir, in_ref, out_dir, meta_out_dir, ax, fig, last_ite, save_field_x_ite, dir_name, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly,xD, D, distance=0.2, precision=False, meta=False, meta_within_case=False, meta_within_alpha=False):
if not meta and not meta_within_case and not meta_within_alpha:
fig, ax = plt.subplots()
_, _, Cl, Cd = calculate_coeficients(in_dir, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly, xD, D, distance)
#3.Plot
saux = r"$\alpha$="
newline = "\n"
if meta:
mlabel = f'{dir_name} {saux}{alpha}'
elif meta_within_alpha:
mlabel = f'{dir_name}'
elif meta_within_case:
mlabel = f'{saux}{alpha}'
else:
mlabel = []
ax.plot(Cd[int(len(Cd)*(3/4))::save_field_x_ite], Cl[int(len(Cl)*(3/4))::save_field_x_ite], 'x-', label=mlabel if mlabel else "")
if precision:
Cd_max, Cd_min, Cd_mean, Cl_max, Cl_min, Cl_mean, center, DCd, DCl = precision_indicators(Cd,Cl,save_field_x_ite)
ax.plot([Cd_min, Cd_max, Cd_max, Cd_min, Cd_min],[Cl_min, Cl_min, Cl_max, Cl_max, Cl_min],'--r')
ax.plot(center[0], center[1], 'ro')
ax.plot(Cd_mean, Cl_mean, 'go')
ax.text(center[0]+0.01, center[1]+0.01, f'C=[{np.round(center[0],2)},{np.round(center[1],2)}]\n'+r'$\Delta Cd$ ' + f'={np.round(DCd,2)}\n'+r'$\Delta Cl$'+f'={np.round(DCl,2)}')
if in_ref:
Cl_kang = np.load(f'{in_ref}A_{alpha}_RE_{Re}_Cl_Kang1999.npy')
Cd_kang = np.load(f'{in_ref}A_{alpha}_RE_{Re}_Cd_Kang1999.npy')
ax.plot(Cd_kang, Cl_kang, 'k--', label=f'(Kang 1999) {saux}{alpha}')
if precision:
Cd_max, Cd_min, Cd_mean, Cl_max, Cl_min, Cl_mean, center, DCd, DCl = precision_indicators(Cd_kang,Cl_kang,1, noise=False)
ax.plot([Cd_min, Cd_max, Cd_max, Cd_min, Cd_min],[Cl_min, Cl_min, Cl_max, Cl_max, Cl_min],'--k')
ax.plot(center[0], center[1], 'ko')
ax.plot(Cd_mean, Cl_mean, 'go')
ax.text(center[0]+0.01, center[1]+0.01, f'C=[{np.round(center[0],2)},{np.round(center[1],2)}]\n'+r'$\Delta Cd$ ' + f'={np.round(DCd,2)}\n'+r'$\Delta Cl$'+f'={np.round(DCl,2)}')
if last_ite:
ax.set(xlabel='Cd [·]', ylabel='Cl [·]', title=f'{f"{dir_name + newline}" if not meta and not meta_within_alpha else ""}' + f'Phase Diagram [' + f'{ f"Alpha={alpha}, " if not meta and not meta_within_case else ""}' +f'Re={Re}, N=[{Nx}x{Ny}] ]')
if mlabel:
ax.legend()
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)
ax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)
if not precision:
fig.savefig(f"{meta_out_dir if (meta or meta_within_alpha) else out_dir}A_{alpha if not meta_within_case and not meta else 'XX'}_RE_{Re}_dx_{Nx}_{Ny}_d_{distance}_Forces_ss.png")
else:
fig.savefig(f"{meta_out_dir if (meta or meta_within_alpha) else out_dir}A_{alpha if not meta_within_case and not meta else 'XX'}_RE_{Re}_dx_{Nx}_{Ny}_d_{distance}_Precision_ss.png")
plt.close()
def evaluate_precision(in_dir, in_ref, out_dir, meta_out_dir, ax, fig, last_ite, save_field_x_ite, dir_name, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly,xD, D, meta=False, meta_within_case=False, meta_within_alpha=False):
if not meta and not meta_within_case and not meta_within_alpha:
fig, ax = plt.subplots()
#1.Calculate precision coeficients
#Reference values
if 'reference' in in_ref:
Cl = np.load(f'{in_ref}A_{alpha}_RE_{Re}_Cl_Kang1999.npy')
Cd = np.load(f'{in_ref}A_{alpha}_RE_{Re}_Cd_Kang1999.npy')
_, _, _, _, _, _, center_ref, DCd_ref, DCl_ref = precision_indicators(Cd,Cl,save_field_x_ite, noise=False)
else:
_, _, Cl, Cd = calculate_coeficients(in_ref, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly,xD, D, distance=0.2)
_, _, _, _, _, _, center_ref, DCd_ref, DCl_ref = precision_indicators(Cd,Cl,save_field_x_ite, noise=True)
#Compare vlues
_, _, Cl, Cd = calculate_coeficients(in_dir, alpha, Re, Nx, Ny, Nt, dx, dy, dt, Lx, Ly,xD, D, distance=0.2)
_, _, _, _, _, _, center, DCd, DCl = precision_indicators(Cd,Cl,save_field_x_ite, noise=True)
#2.Calculate error with respect reference
error_center = np.round(0.5*( 100*((center[0] - center_ref[0])/center_ref[0]) + 100*((center[1] - center_ref[1])/center_ref[1]) ),2)
error_box = np.round(0.5*( 100*((DCl - DCl_ref)/DCl_ref) + 100*((DCd - DCd_ref)/DCd_ref) ),2)
#3.FILE save
with open(f"{meta_out_dir if (meta or meta_within_alpha) else out_dir}A_{alpha if not meta_within_case and not meta else 'XX'}_RE_{Re}_dx_{Nx}_{Ny}_Precision_Evaluation.csv", 'a', newline='') as f_object:
writer_object = writer(f_object)
writer_object.writerow([dir_name, alpha, np.round(center,2), np.round(DCd,2), np.round(DCl,2), np.round(center_ref,2), np.round(DCd_ref,2), | np.round(DCl_ref,2) | numpy.round |
import sys
import re
import numpy as np
from scipy.optimize import minimize,LinearConstraint
from numpy import savetxt,loadtxt
from scipy.stats import chi2
import os
import time
version="QCv1.1"
def read_files_for_P(file, quartets, gnum, GENE_NUM):
topologies = []
genes_pp = {}
NN= GENE_NUM
gnums = []
# quartets = {}
with open(os.path.expanduser(file)) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
#12 Acunomia_m,Afronomia_ci,Aust,Die,| Acunomia_m Aust | Afronomia_circumnitens Dieunomia_heteropoda
for k,line in enumerate(lines):
if not line:
continue
x = line.split("|")[0]
qtree = x.split()[1]
freq = int(x.split()[0])
tree = line.split("|")[1:]
tree = [" ".join(sorted(st.strip().split(" "))) for st in tree]
tree.sort()
tree = "|".join(tree)
# print(tree)
# genes_pp[tree] = [0]*GENE_NUM
if "- -" in tree:
continue
# if qtree=='47,62,66,84,':
# print(tree,(qtree in quartets.keys()),freq)
if qtree in quartets.keys():
if not tree in quartets[qtree]:
quartets[qtree][tree] = [0]*GENE_NUM
else:
quartets[qtree] = {}
quartets[qtree][tree] = [0]*GENE_NUM
quartets[qtree][tree][gnum] = freq
if k%1000 == 0:
print(".",end="")
print(file)
# prev, l = -1, -1
# for k,line in enumerate(lines):
# # [&W 000 1.000000] ((A,C),B,D);
# parts = line.split()
# tree = parts[-1]
# pp = float(parts[2][:-1])
# gnum = int(parts[1])-1
# if gnum != prev:
# l += 1
# prev = gnum
# if tree in genes_pp.keys():
# genes_pp[tree][l] = pp
# else:
# #genes_pp[tree] = [0]*GENE_NUM
# #genes_pp[tree] = [0]*GENE_NUM
# genes_pp[tree] = [0]*NN
# genes_pp[tree][l] = pp
# # trees.append((tree,pp))
# # if trees:
# # maps.append(max(trees,key=lambda x:x[1]))
# # else:
# # maps.append(('',-1))
return quartets
def convert_quartet_to_newick(qstr):
parts = qstr.split("|")
newick = "(("+",".join(parts[0].split())+"),("+",".join(parts[1].split())+"));"
return newick
def print_tofile(quartets, files):
nfiles = len(files)
nq = len(quartets)
eachf = nq/nfiles + 1
filestr = ""
i = 0
plist = []
for q,qdict in quartets.items():
topologies = list(qdict.keys())
print(topologies)
P = convert_to_array(qdict, topologies, GENE_NUM)
P += 10 ** -8
P = P/P.sum(axis=0,keepdims=1)
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=sys.maxsize)
#pstr = np.array2string(P, precision=2, separator=',')
filestr += " ".join([convert_quartet_to_newick(qstr) for qstr in topologies])+'\n'
plist.append(P)
i += 1
if i % int(nq/nfiles) == 0 :
with open(files[int(i/eachf)]+".top",'w') as f:
f.write(filestr)
np.savez(files[int(i/eachf)], *plist)
plist = []
filestr = ""
def print_toafile(quartets, file):
# nfiles = len(files)
# nq = len(quartets)
# eachf = nq/nfiles + 1
filestr = ""
i = 0
plist = []
for q,qdict in quartets.items():
topologies = list(qdict.keys())
P = convert_to_array(qdict, topologies, GENE_NUM)
P += 10 ** -8
P = P/P.sum(axis=0,keepdims=1)
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=sys.maxsize)
#pstr = np.array2string(P, precision=2, separator=',')
filestr += " ".join(topologies)+'\n'
#filestr += " ".join([convert_quartet_to_newick(qstr) for qstr in topologies])+'\n'
plist.append(P)
with open(file+".top",'w') as f:
f.write(filestr)
np.savez(file, *plist)
def convert_to_array(genes_pp,topologies,GENE_NUM):
# topologies = list(genes_pp.keys())
P = np.zeros((3, GENE_NUM))
for i,top in enumerate(topologies):
P[i,] = np.array(genes_pp[top])
# for j in range(GENE_NUM):
# if P[:,j].sum() < 0.99:
# print(j, P[:,j].sum())
return P
def f1(d, i, P):
return -np.log(P[i,]+np.exp(-d)*(1/3.0 - P[i,])).sum()
def f2(d, i, P):
return -np.log((1-np.exp(-d))*P[i,]+np.exp(-d-np.log(3.0))).sum()
def jacobian(d, i, P):
return -((3*P[i,]-1)/(1+3*P[i,]*(np.exp(d)-1))).sum()
def hessian(d, i, P):
return -(( 3 * np.exp(d) * P[i,] * (1 - 3 * P[i,]) )/(1+3*P[i,]*(np.exp(d)-1))**2).sum()
def find_genetrees(P, best_ind, d, topologies):
P[best_ind,] *= (1 - 2/3*np.exp(-d))
# print(list(range(3)),best_ind, list(range(3)).remove(best_ind))
lst = list(range(3))
lst.remove(best_ind)
for i in lst:
P[i,] *= (1/3*np.exp(-d))
gene_indices = np.argmax(P,axis=0)
genetrees = [topologies[i] for i in gene_indices]
return genetrees
if __name__ == "__main__":
start_time = time.time()
file = sys.argv[1]
print(version)
N = 3
genes = []
with open(file) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
GENE_NUM = len(lines)
quartets = {}
for l in lines:
fo = l.split(" ")[1]
#print(l)
#print(l.split('\t'))
gnum = int(l.split()[0])
quartets = read_files_for_P(fo, quartets, gnum,GENE_NUM)
#print(topologies)
print(len(quartets))
# files = [sys.argv[2]+str(j)+".tre" for j in range(int(sys.argv[3])) ]
# print_toafile(quartets,sys.argv[2])
# exit()
# print(quartets)
printstr = ""
for q,qdict in quartets.items():
# print(q+":")
topologies = list(qdict.keys())
print(topologies)
xx = 3 - len(topologies)
for i in range(xx):
qdict['- - | - -'+str(i)] = [0]*GENE_NUM
topologies = list(qdict.keys())
# topologies = [convert_quartet_to_newick(qstr) for qstr in list(qdict.keys())]
# print(str(topologies)+":")
P = convert_to_array(qdict, topologies, GENE_NUM)
# print(P)
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=sys.maxsize)
# print(np.transpose(P))
# print("All sums to 1:", end=" ")
print((P.sum(axis=0) > 0.99).all())
print(P)
P += 10 ** -8
P = P/P.sum(axis=0,keepdims=1)
print(P)
results = []
for i in range(3):
res = minimize(f1, [0.01], method='trust-constr', jac=jacobian, hess=hessian,bounds=[(0,np.inf)],args=(i,P))
results.append(res)
topologies = [convert_quartet_to_newick(qstr) for qstr in list(qdict.keys())]
best_ind = | np.argmin([r.fun for r in results]) | numpy.argmin |
# -*- coding:utf-8 -*-
import os
import random
import numpy as np
from skimage import io, transform
label_map = {'calling_images': 0, 'smoking_images': 1, 'normal_images': 2}
def load_min_batch(data_list, label_list, height=240, width=320):
assert len(data_list) == len(label_list)
images = []
labels = []
for index in range(len(data_list)):
image = io.imread(data_list[index])
image = transform.resize(image, (height, width))
images.append(image)
labels.append(label_map[label_list[index]])
images = np.asarray(images, np.float32)
labels = np.asarray(labels, np.int32)
return images, labels
def load_file(dir_path):
datas = []
labels = []
for dir_name in os.listdir(dir_path):
dir = os.path.join(dir_path, dir_name)
if os.path.isdir(dir):
for image in os.listdir(dir):
datas.append(os.path.join(dir, image))
labels.append(dir_name)
elif os.path.isfile(dir_name):
print("This is a normal file")
continue
else:
print("This is a special file")
continue
return datas, labels
def shuffle_data(datas, labels):
num_data = len(datas)
arr = np.arange(num_data)
| np.random.shuffle(arr) | numpy.random.shuffle |
import numpy as np
from Bio import SeqIO, Seq
import cPickle as pickle
import pandas as pd
from util import *
fs=16
alpha="ACGT-N"
save_as='.fasta.gz'
def hypermutation_statistics():
'''
use pre-calculated files on mutation prevalence in different reads types to
produce analysis figures
'''
mut_files = glob.glob('data/*_mutation_statistics.pkl.gz')
hyper_mut_stats = {}
for fname in mut_files:
outprefix, pat, region, sdate = get_outprefix(fname)
with myopen(fname) as ifile:
hyper_mut_stats[outprefix] = pickle.load(ifile)
#reformat data structure
from itertools import product
mutations = [a+'->'+d for a,d in product(alpha, alpha) if a!=d]
all_muts = {'good':defaultdict(list), 'hyper':defaultdict(list)}
for sample in hyper_mut_stats:
for mtype in all_muts:
for mi,mut in enumerate(mutations):
if len(hyper_mut_stats[sample][mtype]):
all_muts[mtype][mut].extend(hyper_mut_stats[sample][mtype][:,mi])
else:
print("no data on",mtype,sample)
# plot distribitions of different transitions in sequences classified as good and hyper mutant
plt.figure()
tmp = []
for mtype,muts in all_muts.iteritems():
for mut in ['G->A', 'A->G', 'C->T', 'T->C']:
tmp.extend([(mut, mtype, x) for x in muts[mut]])
data = pd.DataFrame(tmp, columns=['mutation', 'type', 'count'])
data.loc[data.loc[:,'type']=='hyper', 'type'] = 'hypermutants'
data.loc[data.loc[:,'type']=='good', 'type'] = 'other reads'
sns.violinplot(x='mutation', y='count', hue='type',data=data, inner=None,
split=True, scale='width', bw=0.5, cut=0)
plt.legend(title=None, fontsize=fs*0.8)
plt.tick_params(labelsize = fs*0.8)
plt.ylabel('number of mutations', fontsize=fs)
plt.xlabel('', fontsize=fs)
plt.savefig('figures/mutation_count.pdf')
# plot the diffferent types of muations in different sequence subsets
for seqtype in ['good', 'hyper', 'suspicious']:
plt.figure()
for sample in hyper_mut_stats:
plt.plot(hyper_mut_stats[sample][seqtype].mean(axis=0), label=sample+str(len(hyper_mut_stats[sample][seqtype])))
plt.ylabel('mean number of mutations')
plt.legend(loc=1, fontsize=8)
plt.xticks(np.arange(30), mutations, rotation=60, fontsize=8)
plt.savefig('figures/mean_number_of_mutations_'+seqtype+'.pdf')
def hyper_mut_ratio(latex=True):
'''
calculate the fraction of reads that are hypermutated, the number of stop codons in good and
reads, as well as the fraction of reads that are obviously impaired
'''
good_files = filter(lambda x:'RNA' not in x, glob.glob('data/*_DNA_clustered_good'+save_as))
hyper_files = filter(lambda x:'RNA' not in x, glob.glob('data/*_DNA_hyper'+save_as))
read_count = defaultdict(dict)
for fnames, seqtype in [(good_files, 'good'),(hyper_files, 'hyper')]:
for fname in fnames:
outprefix, pat, region, sdate = get_outprefix(fname)
print(outprefix)
if region!='p17':
continue
read_count[outprefix][seqtype] = 0
stop_distribution = []
with myopen(fname) as ifile:
for si, seq in enumerate(SeqIO.parse(ifile, 'fasta')):
nread = get_DNA_read_count(seq.name)
read_count[outprefix][seqtype] += nread
seq_str = str(seq.seq)
nstops = Seq.translate(seq_str.replace('-','')[20:]).count('*') # p17 starts at position 20 in the amplicon
stop_distribution.append((nread, nstops))
stop_distribution = | np.array(stop_distribution, dtype=float) | numpy.array |
"""Collection of functions to process mini batches."""
import numpy as np
from sklearn.preprocessing import OneHotEncoder
def invert_full_matrix_np(full_adjacency):
full_adjacency = np.squeeze(full_adjacency)
n_nodes = full_adjacency.shape[1]
full_adjacency = np.append(np.zeros([1, n_nodes]), full_adjacency, axis=0)
full_adjacency[0, 0] = 1
adjacency = np.eye(n_nodes) - | np.linalg.inv(full_adjacency) | numpy.linalg.inv |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table import Table, Column, QTable, table_helpers, NdarrayMixin, unique
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyUserWarning
from astropy import time
from astropy import units as u
from astropy import coordinates
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = Table(T1, masked=masked)
t1a = t1['a'].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1['a'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1['a', 'b'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1['a', 'b'].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by('a')
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg['a'].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3']
assert tg.meta['ta'] == 1
assert tg['c'].meta['a'] == 1
assert tg['c'].description == 'column c'
# Group by a table column
tg2 = t1.group_by(t1['a'])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (['a', 'b'], ('a', 'b')):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 1 b 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 c 7.0 0']
# Group by a Table
tg2 = t1.group_by(t1['a', 'b'])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1['a', 'b'].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 c 7.0 0',
' 2 b 6.0 2',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 2 a 4.0 3',
' 1 b 3.0 5',
' 0 a 0.0 4']
def test_groups_keys(T1):
tg = T1.group_by('a')
keys = tg.groups.keys
assert keys.dtype.names == ('a',)
assert np.all(keys['a'] == np.array([0, 1, 2]))
tg = T1.group_by(['a', 'b'])
keys = tg.groups.keys
assert keys.dtype.names == ('a', 'b')
assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c']))
# Grouping by Column ignores column name
tg = T1.group_by(T1['b'])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by('a')
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group['a'][0] == tg['a'][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
tg = t1.group_by('a')
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert | np.all(tgc.groups.keys == tg.groups.keys) | numpy.all |
#!/usr/bin/python
# coding: UTF-8
#
# Author: <NAME>
# Contact: <EMAIL>
#
# Last update: 12/03/2017
#
# Feel free to contact for any information.
#
# You can cite this code by referencing:
# <NAME>, "Python implementation of Kuramoto systems," 2017-,
# [Online] Available: http://www.laszukdawid.com/codes
#
# LICENCE:
# This program is free software on GNU General Public Licence version 3.
# For details of the copyright please see: http://www.gnu.org/licenses/.
from __future__ import print_function
import numpy as np
from scipy.integrate import ode
__version__ = '0.3'
__author__ = '<NAME>'
class Kuramoto(object):
"""
Implementation of Kuramoto coupling model [1] with harmonic terms
and possible perturbation.
It uses NumPy and Scipy's implementation of Runge-Kutta 4(5)
for numerical integration.
Usage example:
>>> kuramoto = Kuramoto(initial_values)
>>> phase = kuramoto.solve(X)
[1] Kuramoto, Y. (1984). Chemical Oscillations, Waves, and Turbulence
(Vol. 19). doi: doi.org/10.1007/978-3-642-69689-3
"""
_noises = { 'logistic': np.random.logistic,
'normal': np.random.normal,
'uniform': np.random.uniform,
'custom': None
}
noise_types = _noises.keys()
def __init__(self, init_values, noise=None):
"""
Passed arguments should be a dictionary with NumPy arrays
for initial phase (Y0), intrisic frequencies (W) and coupling
matrix (K).
"""
self.dtype = np.float32
self.dt = 1.
self.init_phase = np.array(init_values['Y0'])
self.W = np.array(init_values['W'])
self.K = | np.array(init_values['K']) | numpy.array |
import os
import pandas as pd
import numpy as np
import datetime
from logging import getLogger
from libcity.data.dataset import AbstractDataset
from libcity.data.utils import generate_dataloader
from libcity.utils import StandardScaler, NormalScaler, NoneScaler, \
MinMax01Scaler, MinMax11Scaler, LogScaler, ensure_dir
class TrafficStateDataset(AbstractDataset):
"""
交通状态预测数据集的基类。
默认使用`input_window`的数据预测`output_window`对应的数据,即一个X,一个y。
一般将外部数据融合到X中共同进行预测,因此数据为[X, y]。
默认使用`train_rate`和`eval_rate`在样本数量(num_samples)维度上直接切分训练集、测试集、验证集。
"""
def __init__(self, config):
self.config = config
self.dataset = self.config.get('dataset', '')
self.batch_size = self.config.get('batch_size', 64)
self.cache_dataset = self.config.get('cache_dataset', True)
self.num_workers = self.config.get('num_workers', 0)
self.pad_with_last_sample = self.config.get('pad_with_last_sample', True)
self.train_rate = self.config.get('train_rate', 0.7)
self.eval_rate = self.config.get('eval_rate', 0.1)
self.scaler_type = self.config.get('scaler', 'none')
self.ext_scaler_type = self.config.get('ext_scaler', 'none')
self.load_external = self.config.get('load_external', False)
self.normal_external = self.config.get('normal_external', False)
self.add_time_in_day = self.config.get('add_time_in_day', False)
self.add_day_in_week = self.config.get('add_day_in_week', False)
self.input_window = self.config.get('input_window', 12)
self.output_window = self.config.get('output_window', 12)
self.parameters_str = \
str(self.dataset) + '_' + str(self.input_window) + '_' + str(self.output_window) + '_' \
+ str(self.train_rate) + '_' + str(self.eval_rate) + '_' + str(self.scaler_type) + '_' \
+ str(self.batch_size) + '_' + str(self.load_external) + '_' + str(self.add_time_in_day) + '_' \
+ str(self.add_day_in_week) + '_' + str(self.pad_with_last_sample)
self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/',
'traffic_state_{}.npz'.format(self.parameters_str))
self.cache_file_folder = './libcity/cache/dataset_cache/'
ensure_dir(self.cache_file_folder)
self.data_path = './raw_data/' + self.dataset + '/'
if not os.path.exists(self.data_path):
raise ValueError("Dataset {} not exist! Please ensure the path "
"'./raw_data/{}/' exist!".format(self.dataset, self.dataset))
# 加载数据集的config.json文件
self.weight_col = self.config.get('weight_col', '')
self.data_col = self.config.get('data_col', '')
self.ext_col = self.config.get('ext_col', '')
self.geo_file = self.config.get('geo_file', self.dataset)
self.rel_file = self.config.get('rel_file', self.dataset)
self.data_files = self.config.get('data_files', self.dataset)
self.ext_file = self.config.get('ext_file', self.dataset)
self.output_dim = self.config.get('output_dim', 1)
self.time_intervals = self.config.get('time_intervals', 300) # s
self.init_weight_inf_or_zero = self.config.get('init_weight_inf_or_zero', 'inf')
self.set_weight_link_or_dist = self.config.get('set_weight_link_or_dist', 'dist')
self.calculate_weight_adj = self.config.get('calculate_weight_adj', False)
self.weight_adj_epsilon = self.config.get('weight_adj_epsilon', 0.1)
# 初始化
self.data = None
self.feature_name = {'X': 'float', 'y': 'float'} # 此类的输入只有X和y
self.adj_mx = None
self.scaler = None
self.ext_scaler = None
self.feature_dim = 0
self.ext_dim = 0
self.num_nodes = 0
self.num_batches = 0
self._logger = getLogger()
if os.path.exists(self.data_path + self.geo_file + '.geo'):
self._load_geo()
else:
raise ValueError('Not found .geo file!')
if os.path.exists(self.data_path + self.rel_file + '.rel'): # .rel file is not necessary
self._load_rel()
else:
self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32)
def _load_geo(self):
"""
加载.geo文件,格式[geo_id, type, coordinates, properties(若干列)]
"""
geofile = pd.read_csv(self.data_path + self.geo_file + '.geo')
self.geo_ids = list(geofile['geo_id'])
self.num_nodes = len(self.geo_ids)
self.geo_to_ind = {}
self.ind_to_geo = {}
for index, idx in enumerate(self.geo_ids):
self.geo_to_ind[idx] = index
self.ind_to_geo[index] = idx
self._logger.info("Loaded file " + self.geo_file + '.geo' + ', num_nodes=' + str(len(self.geo_ids)))
def _load_grid_geo(self):
"""
加载.geo文件,格式[geo_id, type, coordinates, row_id, column_id, properties(若干列)]
"""
geofile = pd.read_csv(self.data_path + self.geo_file + '.geo')
self.geo_ids = list(geofile['geo_id'])
self.num_nodes = len(self.geo_ids)
self.geo_to_ind = {}
self.geo_to_rc = {}
for index, idx in enumerate(self.geo_ids):
self.geo_to_ind[idx] = index
for i in range(geofile.shape[0]):
self.geo_to_rc[geofile['geo_id'][i]] = [geofile['row_id'][i], geofile['column_id'][i]]
self.len_row = max(list(geofile['row_id'])) + 1
self.len_column = max(list(geofile['column_id'])) + 1
self._logger.info("Loaded file " + self.geo_file + '.geo' + ', num_grids=' + str(len(self.geo_ids))
+ ', grid_size=' + str((self.len_row, self.len_column)))
def _load_rel(self):
"""
加载.rel文件,格式[rel_id, type, origin_id, destination_id, properties(若干列)],
生成N*N的矩阵,其中权重所在的列名用全局参数`weight_col`来指定,
全局参数`calculate_weight_adj`表示是否需要对加载的.rel的默认权重进行进一步计算,
如果需要,则调用函数self._calculate_adjacency_matrix()进行计算
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
relfile = pd.read_csv(self.data_path + self.rel_file + '.rel')
self._logger.info('set_weight_link_or_dist: {}'.format(self.set_weight_link_or_dist))
self._logger.info('init_weight_inf_or_zero: {}'.format(self.init_weight_inf_or_zero))
if self.weight_col != '': # 根据weight_col确认权重列
if isinstance(self.weight_col, list):
if len(self.weight_col) != 1:
raise ValueError('`weight_col` parameter must be only one column!')
self.weight_col = self.weight_col[0]
self.distance_df = relfile[~relfile[self.weight_col].isna()][[
'origin_id', 'destination_id', self.weight_col]]
else:
if len(relfile.columns) > 5 or len(relfile.columns) < 4: # properties不只一列,且未指定weight_col,报错
raise ValueError("Don't know which column to be loaded! Please set `weight_col` parameter!")
elif len(relfile.columns) == 4: # 4列说明没有properties列,那就是rel文件中有的代表相邻,否则不相邻
self.calculate_weight_adj = False
self.set_weight_link_or_dist = 'link'
self.init_weight_inf_or_zero = 'zero'
self.distance_df = relfile[['origin_id', 'destination_id']]
else: # len(relfile.columns) == 5, properties只有一列,那就默认这一列是权重列
self.weight_col = relfile.columns[-1]
self.distance_df = relfile[~relfile[self.weight_col].isna()][[
'origin_id', 'destination_id', self.weight_col]]
# 把数据转换成矩阵的形式
self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32)
if self.init_weight_inf_or_zero.lower() == 'inf' and self.set_weight_link_or_dist.lower() != 'link':
self.adj_mx[:] = np.inf
for row in self.distance_df.values:
if row[0] not in self.geo_to_ind or row[1] not in self.geo_to_ind:
continue
if self.set_weight_link_or_dist.lower() == 'dist': # 保留原始的距离数值
self.adj_mx[self.geo_to_ind[row[0]], self.geo_to_ind[row[1]]] = row[2]
else: # self.set_weight_link_or_dist.lower()=='link' 只保留01的邻接性
self.adj_mx[self.geo_to_ind[row[0]], self.geo_to_ind[row[1]]] = 1
self._logger.info("Loaded file " + self.rel_file + '.rel, shape=' + str(self.adj_mx.shape))
# 计算权重
if self.calculate_weight_adj:
self._calculate_adjacency_matrix()
def _load_grid_rel(self):
"""
根据网格结构构建邻接矩阵,一个格子跟他周围的8个格子邻接
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
self.adj_mx = np.zeros((len(self.geo_ids), len(self.geo_ids)), dtype=np.float32)
dirs = [[0, 1], [1, 0], [-1, 0], [0, -1], [1, 1], [1, -1], [-1, 1], [-1, -1]]
for i in range(self.len_row):
for j in range(self.len_column):
index = i * self.len_column + j # grid_id
for d in dirs:
nei_i = i + d[0]
nei_j = j + d[1]
if nei_i >= 0 and nei_i < self.len_row and nei_j >= 0 and nei_j < self.len_column:
nei_index = nei_i * self.len_column + nei_j # neighbor_grid_id
self.adj_mx[index][nei_index] = 1
self.adj_mx[nei_index][index] = 1
self._logger.info("Generate grid rel file, shape=" + str(self.adj_mx.shape))
def _calculate_adjacency_matrix(self):
"""
使用带有阈值的高斯核计算邻接矩阵的权重,如果有其他的计算方法,可以覆盖这个函数,
公式为:$ w_{ij} = \exp \left(- \\frac{d_{ij}^{2}}{\sigma^{2}} \\right) $, $\sigma$ 是方差,
小于阈值`weight_adj_epsilon`的值设为0:$ w_{ij}[w_{ij}<\epsilon]=0 $
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
self._logger.info("Start Calculate the weight by Gauss kernel!")
distances = self.adj_mx[~np.isinf(self.adj_mx)].flatten()
std = distances.std()
self.adj_mx = np.exp(-np.square(self.adj_mx / std))
self.adj_mx[self.adj_mx < self.weight_adj_epsilon] = 0
def _load_dyna(self, filename):
"""
加载数据文件(.dyna/.grid/.od/.gridod),子类必须实现这个方法来指定如何加载数据文件,返回对应的多维数据,
提供5个实现好的方法加载上述几类文件,并转换成不同形状的数组:
`_load_dyna_3d`/`_load_grid_3d`/`_load_grid_4d`/`_load_grid_od_4d`/`_load_grid_od_6d`
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组
"""
raise NotImplementedError('Please implement the function `_load_dyna()`.')
def _load_dyna_3d(self, filename):
"""
加载.dyna文件,格式[dyna_id, type, time, entity_id, properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 3d-array: (len_time, num_nodes, feature_dim)
"""
# 加载数据集
self._logger.info("Loading file " + filename + '.dyna')
dynafile = pd.read_csv(self.data_path + filename + '.dyna')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'entity_id')
dynafile = dynafile[data_col]
else: # 不指定则加载所有列
dynafile = dynafile[dynafile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(dynafile['time'][:int(dynafile.shape[0] / len(self.geo_ids))])
self.idx_of_timesolts = dict()
if not dynafile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
# 转3-d数组
feature_dim = len(dynafile.columns) - 2
df = dynafile[dynafile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = []
for i in range(0, df.shape[0], len_time):
data.append(df[i:i+len_time].values)
data = np.array(data, dtype=np.float) # (len(self.geo_ids), len_time, feature_dim)
data = data.swapaxes(0, 1) # (len_time, len(self.geo_ids), feature_dim)
self._logger.info("Loaded file " + filename + '.dyna' + ', shape=' + str(data.shape))
return data
def _load_grid_3d(self, filename):
"""
加载.grid文件,格式[dyna_id, type, time, row_id, column_id, properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载,
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 3d-array: (len_time, num_grids, feature_dim)
"""
# 加载数据集
self._logger.info("Loading file " + filename + '.grid')
gridfile = pd.read_csv(self.data_path + filename + '.grid')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'row_id')
data_col.insert(2, 'column_id')
gridfile = gridfile[data_col]
else: # 不指定则加载所有列
gridfile = gridfile[gridfile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(gridfile['time'][:int(gridfile.shape[0] / len(self.geo_ids))])
self.idx_of_timesolts = dict()
if not gridfile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
# 转3-d数组
feature_dim = len(gridfile.columns) - 3
df = gridfile[gridfile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = []
for i in range(0, df.shape[0], len_time):
data.append(df[i:i + len_time].values)
data = np.array(data, dtype=np.float) # (len(self.geo_ids), len_time, feature_dim)
data = data.swapaxes(0, 1) # (len_time, len(self.geo_ids), feature_dim)
self._logger.info("Loaded file " + filename + '.grid' + ', shape=' + str(data.shape))
return data
def _load_grid_4d(self, filename):
"""
加载.grid文件,格式[dyna_id, type, time, row_id, column_id, properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 4d-array: (len_time, len_row, len_column, feature_dim)
"""
# 加载数据集
self._logger.info("Loading file " + filename + '.grid')
gridfile = pd.read_csv(self.data_path + filename + '.grid')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'row_id')
data_col.insert(2, 'column_id')
gridfile = gridfile[data_col]
else: # 不指定则加载所有列
gridfile = gridfile[gridfile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(gridfile['time'][:int(gridfile.shape[0] / len(self.geo_ids))])
self.idx_of_timesolts = dict()
if not gridfile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
# 转4-d数组
feature_dim = len(gridfile.columns) - 3
df = gridfile[gridfile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = []
for i in range(self.len_row):
tmp = []
for j in range(self.len_column):
index = (i * self.len_column + j) * len_time
tmp.append(df[index:index + len_time].values)
data.append(tmp)
data = np.array(data, dtype=np.float) # (len_row, len_column, len_time, feature_dim)
data = data.swapaxes(2, 0).swapaxes(1, 2) # (len_time, len_row, len_column, feature_dim)
self._logger.info("Loaded file " + filename + '.grid' + ', shape=' + str(data.shape))
return data
def _load_od_4d(self, filename):
"""
加载.od文件,格式[dyna_id, type, time, origin_id, destination_id properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 4d-array: (len_time, len_row, len_column, feature_dim)
"""
self._logger.info("Loading file " + filename + '.od')
odfile = pd.read_csv(self.data_path + filename + '.od')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'origin_id')
data_col.insert(2, 'destination_id')
odfile = odfile[data_col]
else: # 不指定则加载所有列
odfile = odfile[odfile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(odfile['time'][:int(odfile.shape[0] / self.num_nodes / self.num_nodes)])
self.idx_of_timesolts = dict()
if not odfile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
feature_dim = len(odfile.columns) - 3
df = odfile[odfile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = np.zeros((self.num_nodes, self.num_nodes, len_time, feature_dim))
for i in range(self.num_nodes):
origin_index = i * len_time * self.num_nodes # 每个起点占据len_t*n行
for j in range(self.num_nodes):
destination_index = j * len_time # 每个终点占据len_t行
index = origin_index + destination_index
data[i][j] = df[index:index + len_time].values
data = data.transpose((2, 0, 1, 3)) # (len_time, num_nodes, num_nodes, feature_dim)
self._logger.info("Loaded file " + filename + '.od' + ', shape=' + str(data.shape))
return data
def _load_grid_od_4d(self, filename):
"""
加载.gridod文件,格式[dyna_id, type, time, origin_row_id, origin_column_id,
destination_row_id, destination_column_id, properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 4d-array: (len_time, num_grids, num_grids, feature_dim)
"""
# 加载数据集
self._logger.info("Loading file " + filename + '.gridod')
gridodfile = pd.read_csv(self.data_path + filename + '.gridod')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'origin_row_id')
data_col.insert(2, 'origin_column_id')
data_col.insert(3, 'destination_row_id')
data_col.insert(4, 'destination_column_id')
gridodfile = gridodfile[data_col]
else: # 不指定则加载所有列
gridodfile = gridodfile[gridodfile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(gridodfile['time'][:int(gridodfile.shape[0] / len(self.geo_ids) / len(self.geo_ids))])
self.idx_of_timesolts = dict()
if not gridodfile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
# 转4-d数组
feature_dim = len(gridodfile.columns) - 5
df = gridodfile[gridodfile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = np.zeros((len(self.geo_ids), len(self.geo_ids), len_time, feature_dim))
for oi in range(self.len_row):
for oj in range(self.len_column):
origin_index = (oi * self.len_column + oj) * len_time * len(self.geo_ids) # 每个起点占据len_t*n行
for di in range(self.len_row):
for dj in range(self.len_column):
destination_index = (di * self.len_column + dj) * len_time # 每个终点占据len_t行
index = origin_index + destination_index
# print(index, index + len_time)
# print((oi, oj), (di, dj))
# print(oi * self.len_column + oj, di * self.len_column + dj)
data[oi * self.len_column + oj][di * self.len_column + dj] = df[index:index + len_time].values
data = data.transpose((2, 0, 1, 3)) # (len_time, num_grids, num_grids, feature_dim)
self._logger.info("Loaded file " + filename + '.gridod' + ', shape=' + str(data.shape))
return data
def _load_grid_od_6d(self, filename):
"""
加载.gridod文件,格式[dyna_id, type, time, origin_row_id, origin_column_id,
destination_row_id, destination_column_id, properties(若干列)],
.geo文件中的id顺序应该跟.dyna中一致,
其中全局参数`data_col`用于指定需要加载的数据的列,不设置则默认全部加载
Args:
filename(str): 数据文件名,不包含后缀
Returns:
np.ndarray: 数据数组, 6d-array: (len_time, len_row, len_column, len_row, len_column, feature_dim)
"""
# 加载数据集
self._logger.info("Loading file " + filename + '.gridod')
gridodfile = pd.read_csv(self.data_path + filename + '.gridod')
if self.data_col != '': # 根据指定的列加载数据集
if isinstance(self.data_col, list):
data_col = self.data_col.copy()
else: # str
data_col = [self.data_col].copy()
data_col.insert(0, 'time')
data_col.insert(1, 'origin_row_id')
data_col.insert(2, 'origin_column_id')
data_col.insert(3, 'destination_row_id')
data_col.insert(4, 'destination_column_id')
gridodfile = gridodfile[data_col]
else: # 不指定则加载所有列
gridodfile = gridodfile[gridodfile.columns[2:]] # 从time列开始所有列
# 求时间序列
self.timesolts = list(gridodfile['time'][:int(gridodfile.shape[0] / len(self.geo_ids) / len(self.geo_ids))])
self.idx_of_timesolts = dict()
if not gridodfile['time'].isna().any(): # 时间没有空值
self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))
self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.timesolts):
self.idx_of_timesolts[_ts] = idx
# 转6-d数组
feature_dim = len(gridodfile.columns) - 5
df = gridodfile[gridodfile.columns[-feature_dim:]]
len_time = len(self.timesolts)
data = np.zeros((self.len_row, self.len_column, self.len_row, self.len_column, len_time, feature_dim))
for oi in range(self.len_row):
for oj in range(self.len_column):
origin_index = (oi * self.len_column + oj) * len_time * len(self.geo_ids) # 每个起点占据len_t*n行
for di in range(self.len_row):
for dj in range(self.len_column):
destination_index = (di * self.len_column + dj) * len_time # 每个终点占据len_t行
index = origin_index + destination_index
# print(index, index + len_time)
data[oi][oj][di][dj] = df[index:index + len_time].values
data = data.transpose((4, 0, 1, 2, 3, 5)) # (len_time, len_row, len_column, len_row, len_column, feature_dim)
self._logger.info("Loaded file " + filename + '.gridod' + ', shape=' + str(data.shape))
return data
def _load_ext(self):
"""
加载.ext文件,格式[ext_id, time, properties(若干列)],
其中全局参数`ext_col`用于指定需要加载的数据的列,不设置则默认全部加载
Returns:
np.ndarray: 外部数据数组,shape: (timeslots, ext_dim)
"""
# 加载数据集
extfile = pd.read_csv(self.data_path + self.ext_file + '.ext')
if self.ext_col != '': # 根据指定的列加载数据集
if isinstance(self.ext_col, list):
ext_col = self.ext_col.copy()
else: # str
ext_col = [self.ext_col].copy()
ext_col.insert(0, 'time')
extfile = extfile[ext_col]
else: # 不指定则加载所有列
extfile = extfile[extfile.columns[1:]] # 从time列开始所有列
# 求时间序列
self.ext_timesolts = extfile['time']
self.idx_of_ext_timesolts = dict()
if not extfile['time'].isna().any(): # 时间没有空值
self.ext_timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.ext_timesolts))
self.ext_timesolts = np.array(self.ext_timesolts, dtype='datetime64[ns]')
for idx, _ts in enumerate(self.ext_timesolts):
self.idx_of_ext_timesolts[_ts] = idx
# 求外部特征数组
feature_dim = len(extfile.columns) - 1
df = extfile[extfile.columns[-feature_dim:]].values
self._logger.info("Loaded file " + self.ext_file + '.ext' + ', shape=' + str(df.shape))
return df
def _add_external_information(self, df, ext_data=None):
"""
将外部数据和原始交通状态数据结合到高维数组中,子类必须实现这个方法来指定如何融合外部数据和交通状态数据,
如果不想加外部数据,可以把交通状态数据`df`直接返回,
提供3个实现好的方法适用于不同形状的交通状态数据跟外部数据结合:
`_add_external_information_3d`/`_add_external_information_4d`/`_add_external_information_6d`
Args:
df(np.ndarray): 交通状态数据多维数组
ext_data(np.ndarray): 外部数据
Returns:
np.ndarray: 融合后的外部数据和交通状态数据
"""
raise NotImplementedError('Please implement the function `_add_external_information()`.')
def _add_external_information_3d(self, df, ext_data=None):
"""
增加外部信息(一周中的星期几/day of week,一天中的某个时刻/time of day,外部数据)
Args:
df(np.ndarray): 交通状态数据多维数组, (len_time, num_nodes, feature_dim)
ext_data(np.ndarray): 外部数据
Returns:
np.ndarray: 融合后的外部数据和交通状态数据, (len_time, num_nodes, feature_dim_plus)
"""
num_samples, num_nodes, feature_dim = df.shape
is_time_nan = np.isnan(self.timesolts).any()
data_list = [df]
if self.add_time_in_day and not is_time_nan:
time_ind = (self.timesolts - self.timesolts.astype("datetime64[D]")) / np.timedelta64(1, "D")
time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(time_in_day)
# Todo
if self.add_day_in_week and not is_time_nan:
dayofweek = []
for day in self.timesolts.astype("datetime64[D]"):
dayofweek.append(datetime.datetime.strptime(str(day), '%Y-%m-%d').weekday())
day_in_week = np.zeros(shape=(num_samples, num_nodes, 7))
day_in_week[np.arange(num_samples), :, dayofweek] = 1
data_list.append(day_in_week)
# 外部数据集
if ext_data is not None:
if not is_time_nan:
indexs = []
for ts in self.timesolts:
ts_index = self.idx_of_ext_timesolts[ts]
indexs.append(ts_index)
select_data = ext_data[indexs] # T * ext_dim 选出所需要的时间步的数据
for i in range(select_data.shape[1]):
data_ind = select_data[:, i]
data_ind = np.tile(data_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(data_ind)
else: # 没有给出具体的时间戳,只有外部数据跟原数据等长才能默认对接到一起
if ext_data.shape[0] == df.shape[0]:
select_data = ext_data # T * ext_dim
for i in range(select_data.shape[1]):
data_ind = select_data[:, i]
data_ind = np.tile(data_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(data_ind)
data = np.concatenate(data_list, axis=-1)
return data
def _add_external_information_4d(self, df, ext_data=None):
"""
增加外部信息(一周中的星期几/day of week,一天中的某个时刻/time of day,外部数据)
Args:
df(np.ndarray): 交通状态数据多维数组, (len_time, len_row, len_column, feature_dim)
ext_data(np.ndarray): 外部数据
Returns:
np.ndarray: 融合后的外部数据和交通状态数据, (len_time, len_row, len_column, feature_dim_plus)
"""
num_samples, len_row, len_column, feature_dim = df.shape
is_time_nan = | np.isnan(self.timesolts) | numpy.isnan |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Initial State Custom """
import unittest
from test.aqua import QiskitAquaTestCase
import numpy as np
from qiskit.aqua import AquaError, aqua_globals
from qiskit.aqua.components.initial_states import Custom
class TestInitialStateCustom(QiskitAquaTestCase):
""" Test Initial State Custom """
def test_qubits_2_zero_vector(self):
""" qubits 2 zero vector test """
custom = Custom(2, state='zero')
cct = custom.construct_circuit('vector')
np.testing.assert_array_equal(cct, [1.0, 0.0, 0.0, 0.0])
def test_qubits_5_zero_vector(self):
""" qubits 5 zero vector test """
custom = Custom(5, state='zero')
cct = custom.construct_circuit('vector')
np.testing.assert_array_equal(cct, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
def test_qubits_2_zero_circuit(self):
""" qubits 2 zero circuit test """
custom = Custom(2, state='zero')
cct = custom.construct_circuit('circuit')
self.assertEqual(cct.qasm(), 'OPENQASM 2.0;\ninclude "qelib1.inc";\nqreg q[2];\n')
def test_qubits_5_zero_circuit(self):
""" qubits 5 zero circuit test """
custom = Custom(5, state='zero')
cct = custom.construct_circuit('circuit')
self.assertEqual(cct.qasm(), 'OPENQASM 2.0;\ninclude "qelib1.inc";\nqreg q[5];\n')
def test_qubits_2_uniform_vector(self):
""" qubits 2 uniform vector test """
custom = Custom(2, state='uniform')
cct = custom.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.5]*4)
def test_qubits_5_uniform_vector(self):
""" qubits 5 uniform vector test """
custom = Custom(5, state='uniform')
cct = custom.construct_circuit('vector')
np.testing.assert_array_almost_equal(cct, [0.1767767]*32)
def test_qubits_2_uniform_circuit(self):
""" qubits 2 uniform circuit test """
custom = Custom(2, state='uniform')
cct = custom.construct_circuit('circuit')
self.assertEqual(cct.qasm(),
'OPENQASM 2.0;\ninclude "qelib1.inc";\nqreg q[2];\n'
'u2(0.0,3.141592653589793) q[0];\nu2(0.0,3.141592653589793) q[1];\n')
def test_qubits_2_random_vector(self):
""" qubits 2 random vector test """
custom = Custom(2, state='random')
cct = custom.construct_circuit('vector')
prob = np.sqrt(np.sum([x**2 for x in cct]))
self.assertAlmostEqual(prob, 1.0)
def test_qubits_5_random_vector(self):
""" qubits 5 random vector test """
custom = Custom(5, state='random')
cct = custom.construct_circuit('vector')
prob = np.sqrt(np.sum([x**2 for x in cct]))
self.assertAlmostEqual(prob, 1.0)
def test_qubits_2_given_vector(self):
""" qubits 2 given vector test """
custom = Custom(2, state_vector=[0.5]*4)
cct = custom.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.5]*4)
def test_qubits_5_given_vector(self):
""" qubits 5 given vector test """
custom = Custom(5, state_vector=[1.0]*32)
cct = custom.construct_circuit('vector')
| np.testing.assert_array_almost_equal(cct, [0.1767767]*32) | numpy.testing.assert_array_almost_equal |
#Copyright (c) 2017 <NAME>.
#Cura is released under the terms of the LGPLv3 or higher.
import gc
from UM.Job import Job
from UM.Application import Application
from UM.Mesh.MeshData import MeshData
from UM.Preferences import Preferences
from UM.View.GL.OpenGLContext import OpenGLContext
from UM.Message import Message
from UM.i18n import i18nCatalog
from UM.Logger import Logger
from UM.Math.Vector import Vector
from cura.Scene.BuildPlateDecorator import BuildPlateDecorator
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.Settings.ExtruderManager import ExtruderManager
from cura import LayerDataBuilder
from cura import LayerDataDecorator
from cura import LayerPolygon
import numpy
from time import time
from cura.Settings.ExtrudersModel import ExtrudersModel
catalog = i18nCatalog("cura")
## Return a 4-tuple with floats 0-1 representing the html color code
#
# \param color_code html color code, i.e. "#FF0000" -> red
def colorCodeToRGBA(color_code):
if color_code is None:
Logger.log("w", "Unable to convert color code, returning default")
return [0, 0, 0, 1]
return [
int(color_code[1:3], 16) / 255,
int(color_code[3:5], 16) / 255,
int(color_code[5:7], 16) / 255,
1.0]
class ProcessSlicedLayersJob(Job):
def __init__(self, layers):
super().__init__()
self._layers = layers
self._scene = Application.getInstance().getController().getScene()
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, -1)
self._abort_requested = False
self._build_plate_number = None
## Aborts the processing of layers.
#
# This abort is made on a best-effort basis, meaning that the actual
# job thread will check once in a while to see whether an abort is
# requested and then stop processing by itself. There is no guarantee
# that the abort will stop the job any time soon or even at all.
def abort(self):
self._abort_requested = True
def setBuildPlate(self, new_value):
self._build_plate_number = new_value
def getBuildPlate(self):
return self._build_plate_number
def run(self):
Logger.log("d", "Processing new layer for build plate %s..." % self._build_plate_number)
start_time = time()
view = Application.getInstance().getController().getActiveView()
if view.getPluginId() == "SimulationView":
view.resetLayerData()
self._progress_message.show()
Job.yieldThread()
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
Application.getInstance().getController().activeViewChanged.connect(self._onActiveViewChanged)
# The no_setting_override is here because adding the SettingOverrideDecorator will trigger a reslice
new_node = CuraSceneNode(no_setting_override = True)
new_node.addDecorator(BuildPlateDecorator(self._build_plate_number))
# Force garbage collection.
# For some reason, Python has a tendency to keep the layer data
# in memory longer than needed. Forcing the GC to run here makes
# sure any old layer data is really cleaned up before adding new.
gc.collect()
mesh = MeshData()
layer_data = LayerDataBuilder.LayerDataBuilder()
layer_count = len(self._layers)
# Find the minimum layer number
# When using a raft, the raft layers are sent as layers < 0. Instead of allowing layers < 0, we
# instead simply offset all other layers so the lowest layer is always 0. It could happens that
# the first raft layer has value -8 but there are just 4 raft (negative) layers.
min_layer_number = 0
negative_layers = 0
for layer in self._layers:
if layer.id < min_layer_number:
min_layer_number = layer.id
if layer.id < 0:
negative_layers += 1
current_layer = 0
for layer in self._layers:
# Negative layers are offset by the minimum layer number, but the positive layers are just
# offset by the number of negative layers so there is no layer gap between raft and model
abs_layer_number = layer.id + abs(min_layer_number) if layer.id < 0 else layer.id + negative_layers
layer_data.addLayer(abs_layer_number)
this_layer = layer_data.getLayer(abs_layer_number)
layer_data.setLayerHeight(abs_layer_number, layer.height)
layer_data.setLayerThickness(abs_layer_number, layer.thickness)
for p in range(layer.repeatedMessageCount("path_segment")):
polygon = layer.getRepeatedMessage("path_segment", p)
extruder = polygon.extruder
line_types = numpy.fromstring(polygon.line_type, dtype="u1") # Convert bytearray to numpy array
line_types = line_types.reshape((-1,1))
points = numpy.fromstring(polygon.points, dtype="f4") # Convert bytearray to numpy array
if polygon.point_type == 0: # Point2D
points = points.reshape((-1,2)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
else: # Point3D
points = points.reshape((-1,3))
line_widths = | numpy.fromstring(polygon.line_width, dtype="f4") | numpy.fromstring |
''' Example that shows the transient planar sensor signal after irradiation.
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
from scarce import silicon, solver, sensor, tools
def transient_irrad():
# For CCE important parameters
fluence = 5e15 # Neq/cm2
V_bias = -1000.
n_eff_0 = 1.7e12
# Calculate effective doping concentration after irradiation
n_eff = silicon.get_eff_acceptor_concentration(fluence=fluence,
n_eff_0=n_eff_0,
is_ntype=True,
is_oxygenated=True)
# Planar sensor parameters
width = 50.
thickness = 200.
temperature = 300.
pitch = 45.
n_pixel = 9
V_readout = 0.
resolution = 200
# Create sensor
pot_w_descr, pot_descr = sensor.planar_sensor(n_eff=n_eff,
V_bias=V_bias,
V_readout=V_readout,
temperature=temperature,
n_pixel=n_pixel,
width=width,
pitch=pitch,
thickness=thickness,
resolution=resolution,
# Might have to be adjusted
# when changing the
# geometry
smoothing=0.01)
# Start parameters of e-h pairs
# Create 10 e-h pairs every 5 um in y
xx, yy = np.meshgrid( | np.linspace(0, width, 1) | numpy.linspace |
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import os
import scipy
import imageio
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.colors import ListedColormap
import statsmodels.api as sm
import pandas as pd
from statsmodels.stats.anova import AnovaRM
from sklearn import linear_model
from helper_code.registration_funcs import model_arena, get_arena_details
from helper_code.processing_funcs import speed_colors
from helper_code.analysis_funcs import *
from important_code.shuffle_test import permutation_test, permutation_correlation
plt.rcParams.update({'font.size': 30})
def plot_traversals(self):
''' plot all traversals across the arena '''
# initialize parameters
sides = ['back', 'front']
# sides = ['back']
types = ['spontaneous'] #, 'evoked']
fast_color = np.array([.5, 1, .5])
slow_color = np.array([1, .9, .9])
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
edge_vector_color = np.array([.98, .9, .6])**4
homing_vector_color = np.array([0, 0, 0])
non_escape_color = np.array([0,0,0])
condition_colors = [[.5,.5,.5], [.3,.5,.8], [0,.7,1]]
time_thresh = 15 #20 for ev comparison
speed_thresh = 2
p = 0
HV_cutoff = .681 # .5 for exploratory analysis
# initialize figures
fig, fig2, fig3, ax, ax2, ax3 = initialize_figures_traversals(self) #, types = len(types)+1)
# initialize lists for stats
all_data = []
all_conditions = []
edge_vector_time_all = np.array([])
# loop over spontaneous vs evoked
for t, type in enumerate(types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
strategies = [0, 0, 0]
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# initialize edginess
all_traversals_edgy = {}
all_traversals_homy = {}
proportion_edgy = {}
for s in sides:
all_traversals_edgy[s] = []
all_traversals_homy[s] = []
proportion_edgy[s] = []
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse in the experiment
for i, mouse in enumerate(self.analysis[experiment][condition]['back traversal']):
mouse_data = []
print(mouse)
# loop over back and front sides
for s, start in enumerate(sides):
if start == 'front' and type == 'evoked': continue
# find all the paths across the arena
traversal = self.analysis[experiment][condition][start + ' traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
if traversal[t*5]:
x_end_loc = np.array([x_loc[-1] * scaling_factor for x_loc in np.array(traversal[t * 5 + 0])[:, 0]])
if traversal[4] < 10: continue
number_of_edge_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) > HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) ) / min(traversal[4], time_thresh) * time_thresh
# print(traversal[4])
number_of_homing_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) < HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) )/ min(traversal[4], time_thresh) * time_thresh
all_traversals_edgy[start].append( number_of_edge_vectors )
all_traversals_homy[start].append(number_of_homing_vectors)
# print(number_of_edge_vectors)
mouse_data.append(number_of_edge_vectors)
# get the time of edge vectors
if condition == 'obstacle' and 'wall' in experiment:
edge_vector_idx = ( (np.array(traversal[t * 5 + 3]) < speed_thresh) * (np.array(traversal[t * 5 + 2]) > HV_cutoff) )
edge_vector_time = np.array(traversal[t*5+1])[edge_vector_idx] / 30 / 60
edge_vector_time_all = np.concatenate((edge_vector_time_all, edge_vector_time))
# prop_edgy = np.sum((np.array(traversal[t*5 + 3]) < speed_thresh) * \
# (np.array(traversal[t*5 + 2]) > HV_cutoff) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60)) / \
# np.sum((np.array(traversal[t * 5 + 3]) < speed_thresh) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60))
else:
all_traversals_edgy[start].append(0)
all_traversals_homy[start].append(0)
# if np.isnan(prop_edgy): prop_edgy = .5
# prop_edgy = prop_edgy / .35738
# proportion_edgy[start].append(prop_edgy)
traversal_coords = np.array(traversal[t*5+0])
pre_traversal = np.array(traversal[10])
else:
# all_traversals_edginess[start].append(0)
continue
m += .5
# loop over all paths
show = False
if show and traversal:
for trial in range(traversal_coords.shape[0]):
# make sure it qualifies
if traversal[t * 5 + 3][trial] > speed_thresh: continue
if traversal[t*5+1][trial] > time_thresh*30*60: continue
if not len(pre_traversal[0][0]): continue
# if abs(traversal_coords[trial][0][-1]*scaling_factor - 50) > 30: continue
# downsample to get even coverage
# if c == 2 and np.random.random() > (59 / 234): continue
# if c == 1 and np.random.random() > (59 / 94): continue
if traversal[t*5+2][trial]> HV_cutoff: plot_color = edge_vector_color
else: plot_color = homing_vector_color
display_traversal(scaling_factor, traversal_coords, pre_traversal, trial, path_ax, plot_color)
if mouse_data:
# all_data.append(mouse_data)
all_conditions.append(c)
# save image
path_fig.savefig(os.path.join(self.summary_plots_folder, self.labels[c] + ' traversals.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot the data
if type == 'spontaneous' and len(sides) > 1:
plot_number_edgy = np.array(all_traversals_edgy['front']).astype(float) + np.array(all_traversals_edgy['back']).astype(float)
plot_number_homy = np.array(all_traversals_homy['front']).astype(float) + np.array(all_traversals_homy['back']).astype(float)
print(np.sum(plot_number_edgy + plot_number_homy))
# plot_proportion_edgy = (np.array(proportion_edgy['front']).astype(float) + np.array(proportion_edgy['back']).astype(float)) / 2
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
all_data.append(plot_number_edgy)
else:
plot_number_edgy = np.array(all_traversals_edgy[sides[0]]).astype(float)
plot_number_homy = np.array(all_traversals_homy[sides[0]]).astype(float)
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
# plot_proportion_edgy = np.array(proportion_edgy[sides[0]]).astype(float)
for i, (plot_data, ax0) in enumerate(zip([plot_number_edgy, plot_number_homy], [ax, ax3])): #, plot_proportion_edgy , ax2
print(plot_data)
print(np.sum(plot_data))
# plot each trial
# scatter_axis = scatter_the_axis( (p*4/3+.5/3), plot_data)
ax0.scatter(np.ones_like(plot_data)* (p*4/3+.5/3)* 3 - .2, plot_data, color=[0,0,0, .4], edgecolors='none', s=25, zorder=99)
# do kde
# if i==0: bw = .5
# else: bw = .02
bw = .5
kde = fit_kde(plot_data, bw=bw)
plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=.3, color=[.5, .5, .5], violin=False, clip=True)
ax0.plot([4 * p + -.2, 4 * p + -.2], [np.percentile(plot_data, 25), np.percentile(plot_data, 75)], color = [0,0,0])
ax0.plot([4 * p + -.4, 4 * p + -.0], [np.percentile(plot_data, 50), np.percentile(plot_data, 50)], color = [1,1,1], linewidth = 2)
# else:
# # kde = fit_kde(plot_data, bw=.03)
# # plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=True)
# bp = ax0.boxplot([plot_data, [0, 0]], positions=[4 * p + -.2, -10], showfliers=False, zorder=99)
# ax0.set_xlim([-1, 4 * len(self.experiments) - 1])
p+=1
# plot a stacked bar of strategies
# fig3 = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
# make timing hist
plt.figure()
bins = np.arange(0,22.5,2.5)
plt.hist(edge_vector_time_all, bins = bins, color = [0,0,0], weights = np.ones_like(edge_vector_time_all) / 2.5 / m) #condition_colors[c])
plt.ylim([0,2.1])
plt.show()
# # save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
group_A = [[d] for d in all_data[0]]
group_B = [[d] for d in all_data[2]]
permutation_test(group_A, group_B, iterations = 100000, two_tailed = False)
group_A = [[d] for d in all_data[2]]
group_B = [[d] for d in all_data[1]]
permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
def plot_speed_traces(self, speed = 'absolute'):
''' plot the speed traces '''
max_speed = 60
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
RT, end_idx, scaling_factor, speed_traces, subgoal_speed_traces, time, time_axis, trial_num = \
initialize_variables(number_of_trials, self,sub_experiments)
# create custom colormap
colormap = speed_colormap(scaling_factor, max_speed, n_bins=256, v_min=0, v_max=max_speed)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
for trial in range(len(self.analysis[experiment][condition]['speed'][mouse])):
if trial > 2: continue
trial_num = fill_in_trial_data(RT, condition, end_idx, experiment, mouse, scaling_factor, self,
speed_traces, subgoal_speed_traces, time, trial, trial_num)
# print some useful metrics
print_metrics(RT, end_idx, number_of_mice, number_of_trials)
# put the speed traces on the plot
fig = show_speed_traces(colormap, condition, end_idx, experiment, number_of_trials, speed, speed_traces, subgoal_speed_traces, time_axis, max_speed)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('done')
def plot_escape_paths(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = [np.array([1, .95, .85]), np.array([.98, .9, .6])**4]
homing_vector_color = [ np.array([.725, .725, .725]), np.array([0, 0, 0])]
non_escape_color = np.array([0,0,0])
fps = 30
escape_duration = 18 #6 #9 for food # 18 for U
min_distance_to_shelter = 30
HV_cutoff = 0.681 #.75 #.7
# initialize all data for stats
all_data = [[], [], [], []]
all_conditions = []
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
# more arena stuff for this analysis type
arena_reference = arena_color.copy()
arena_color[arena_reference == 245] = 255
get_arena_details(self, experiment=sub_experiments[0])
shelter_location = [s / scaling_factor / 10 for s in self.shelter_location]
# initialize strategy array
strategies = np.array([0,0,0])
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 9
else:
escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# color based on visual vs tactile obst avoidance
# if mouse == 'CA7190' or mouse == 'CA3210' or mouse == 'CA3155' or mouse == 'CA8100':
# edge_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# homing_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# else:
# edge_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# homing_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# show escape paths
show_escape_paths(HV_cutoff, arena, arena_color, arena_reference, c, condition, edge_vector_color, escape_duration, experiment, fps,
homing_vector_color, min_distance_to_shelter, mouse, non_escape_color, scaling_factor, self, shelter_location, strategies, path_ax,
determine_strategy = False) #('dark' in experiment and condition=='obstacle'))
# save image
# scipy.misc.imsave(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
imageio.imwrite(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot a stacked bar of strategies
fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('escape')
# strategies = np.array([4,5,0])
# fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# plt.show()
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# group_A = [[0],[1],[0,0,0],[0,0],[0,1],[1,0],[0,0,0]]
# group_B = [[1,0,0],[0,0,0,0],[0,0,0],[1,0,0],[0,0,0]]
# permutation_test(group_B, group_A, iterations = 10000, two_tailed = False)
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1]]
# obstacle_exp = [[0,1],[0,0,0,0,1],[0,1],[0]]
open_field = [[1,0,0,0,0],[0,0,0,0,0],[0,0,0,0],[1,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0,0,0]]
# U_shaped = [[0,1],[1,1], [1,1], [0,0,1], [0,0,0], [0], [1], [0], [0,1], [0,1,0,0], [0,0,0]]
# permutation_test(open_field, obstacle, iterations = 10000, two_tailed = False)
# do same edgy homing then stop to both
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0,0],[0,0,0],[1,0,0],[0,0,0],[0,0,1]] #stop at 3 trials
# do same edgy homing then stop to both --> exclude non escapes
obstacle = [[0],[1],[0,0,0],[0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0],[0,0,0],[1,0,0],[0,0,0],[0,1]] #stop at 3 trials
def plot_edginess(self):
# initialize parameters
fps = 30
escape_duration = 12 #9 #6
HV_cutoff = .681 #.681
ETD = 10 #10
traj_loc = 40
edge_vector_color = np.array([.98, .9, .6])**5
edge_vector_color = np.array([.99, .94, .6]) ** 3
# edge_vector_color = np.array([.99, .95, .6]) ** 5
homing_vector_color = np.array([0, 0, 0])
# homing_vector_color = np.array([.85, .65, .8])
# edge_vector_color = np.array([.65, .85, .7])
# colors for diff conditions
colors = [np.array([.7, 0, .3]), np.array([0, .8, .5])]
colors = [np.array([.3,.3,.3]), np.array([1, .2, 0]), np.array([0, .8, .4]), np.array([0, .7, .9])]
colors = [np.array([.3, .3, .3]), np.array([1, .2, 0]), np.array([.7, 0, .7]), np.array([0, .7, .9]), np.array([0,1,0])]
# colors = [np.array([0, 0, 0]), np.array([0, 0, 0]),np.array([0, 0, 0]), np.array([0, 0, 0])]
offset = [0,.2, .2, 0]
# initialize figures
fig, fig2, fig3, fig4, _, ax, ax2, ax3 = initialize_figures(self)
# initialize all data for stats
all_data = [[],[],[],[]]
all_conditions = []
mouse_ID = []; m = 1
dist_data_EV_other_all = []
delta_ICs, delta_x_end = [], []
time_to_shelter, was_escape = [], []
repetitions = 1
for rand_select in range(repetitions):
m = -1
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
num_trials_total = 0
num_trials_escape = 0
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
t_total = 0
# initialize array to fill in with each trial's data
edginess, end_idx, time_since_down, time_to_shelter, time_to_shelter_all, prev_edginess, scaling_factor, time_in_center, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
mouse_ID_trial = edginess.copy()
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 12
else: escape_duration = 12
# elif 'up' in experiment and 'probe' in condition:
# escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
m+=1
# initialize mouse data for stats
mouse_data = [[],[],[],[]]
print(mouse)
skip_mouse = False
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
prev_homings = []
x_edges_used = []
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
# impose conditions
if 'food' in experiment:
if t > 12: continue
if condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
num_trials_total += 1
elif 'void' in experiment:
if t > 5: continue
else:
if t>2: continue
# if trial > 2: continue
num_trials_total += 1
# if trial!=2: continue
# if 'off' in experiment and trial: continue
# if trial < 3 and 'wall down' in experiment: continue
# if condition == 'obstacle' and not 'non' in experiment and \
# self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
# if c == 0 and not (trial > 0): continue
# if c == 1 and not (trial): continue
# if c == 2 and not (trial == 0): continue
# if trial and ('lights on off' in experiment and not 'baseline' in experiment): continue
if 'Square' in experiment:
HV_cutoff = .56
HV_cutoff = 0
y_idx = self.analysis[experiment][condition]['path'][mouse][trial][1]
if y_idx[0] * scaling_factor > 50: continue
else:
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# print(y_start)
# print(x_start)
if y_start > 25: continue
if abs(x_start-50) > 30: continue
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
if np.isnan(end_idx[trial_num]) or (end_idx[trial_num] > escape_duration * fps):
# if not ('up' in experiment and 'probe' in condition and not np.isnan(RT)):
# mouse_data[3].append(0)
continue
''' check for previous edgy homings '''
# if 'dark' in experiment or True:
# num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial)
# # print(num_prev_edge_vectors)
# if num_prev_edge_vectors and c: continue
# if not num_prev_edge_vectors and not c: continue
# if num_prev_edge_vectors < 3 and (c==0): continue
# if num_prev_edge_vectors > 0 and c < 4: continue
# if t>1 and c == 2: continue
# if num_prev_edge_vectors >= 2: print('prev edgy homing'); continue
# if x_edge in x_edges_used: print('prev edgy escape'); continue
#
# print('-----------' + mouse + '--------------')
#
# if self.analysis[experiment][condition]['edginess'][mouse][trial] <= HV_cutoff:
# print(' HV ')
# else:
# print(' EDGY ')
# # edgy trial has occurred
# print('EDGY TRIAL ' + str(trial))
# x_edges_used.append(x_edge)
#
# # select only *with* prev homings
# if not num_prev_edge_vectors:
# if not x_edge in x_edges_used:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] > HV_cutoff:
# x_edges_used.append(x_edge)
# continue
# print(t)
num_trials_escape += 1
# add data
edginess[trial_num] = self.analysis[experiment][condition]['edginess'][mouse][trial]
time_since_down[trial_num] = np.sqrt((x_start - 50)**2 + (y_start - 50)**2 )# self.analysis[experiment][condition]['start angle'][mouse][trial]
print(edginess[trial_num])
if 'Square' in experiment:
if edginess[trial_num] <=-.3: # and False: #.15
edginess[trial_num] = np.nan
continue
# edginess to current edge as opposed to specific edge
if (('moves left' in experiment and condition == 'no obstacle') \
or ('moves right' in experiment and condition== 'obstacle')): # and False:
if edginess[trial_num] <= -0: # and False:
edginess[trial_num] = np.nan
continue
edginess[trial_num] = edginess[trial_num] - 1
# shelter edginess
if False:
y_pos = self.analysis[experiment][condition]['path'][mouse][trial][1][:int(end_idx[trial_num])] * scaling_factor
x_pos = self.analysis[experiment][condition]['path'][mouse][trial][0][:int(end_idx[trial_num])] * scaling_factor
# get the latter phase traj
y_pos_1 = 55
y_pos_2 = 65
x_pos_1 = x_pos[np.argmin(abs(y_pos - y_pos_1))]
x_pos_2 = x_pos[np.argmin(abs(y_pos - y_pos_2))]
#where does it end up
slope = (y_pos_2 - y_pos_1) / (x_pos_2 - x_pos_1)
intercept = y_pos_1 - x_pos_1 * slope
x_pos_proj = (80 - intercept) / slope
# compared to
x_pos_shelter_R = 40 #40.5 # defined as mean of null dist
# if 'long' in self.labels[c]:
# x_pos_shelter_R += 18
# compute the metric
shelter_edginess = (x_pos_proj - x_pos_shelter_R) / 18
edginess[trial_num] = -shelter_edginess
# if condition == 'obstacle' and 'left' in experiment:edginess[trial_num] = -edginess[trial_num] # for putting conditions together
# get previous edginess #TEMPORARY COMMENT
# if not t:
# SH_data = self.analysis[experiment][condition]['prev homings'][mouse][-1]
# time_to_shelter.append(np.array(SH_data[2]))
# was_escape.append(np.array(SH_data[4]))
if False: # or True:
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, delta_ICs, delta_x_end)
print(prev_edginess[trial_num])
print(trial + 1)
print('')
# get time in center
# time_in_center[trial_num] = self.analysis[experiment][condition]['time exploring obstacle'][mouse][trial]
# time_in_center[trial_num] = num_PORHVs
# if num_PORHVs <= 1:
# edginess[trial_num] = np.nan
# continue
# if (prev_edginess[trial_num] < HV_cutoff and not t) or skip_mouse:
# edginess[trial_num] = np.nan
# skip_mouse = True
# continue
''' qualify by prev homings '''
# if prev_edginess[trial_num] < .4: # and c:
# edginess[trial_num] = np.nan
# prev_edginess[trial_num] = np.nan
# continue
num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD = 10)
# print(str(num_prev_edge_vectors) + ' EVs')
#
# if not num_prev_edge_vectors >= 1 and c ==0:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if not num_prev_edge_vectors < 1 and c ==1:
# edginess[trial_num] = np.nan
# t+=1
# continue
# print(num_prev_edge_vectors)
# if num_prev_edge_vectors !=0 and c==3:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if num_prev_edge_vectors != 1 and c == 2:
# edginess[trial_num] = np.nan
# t += 1
# continue
# if num_prev_edge_vectors != 2 and num_prev_edge_vectors != 3 and c ==1:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# if num_prev_edge_vectors < 4 and c ==0:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# print(trial + 1)
# print(prev_edginess[trial_num])
# print(edginess[trial_num])
# print('')
# print(t)
# get time since obstacle removal?
# time_since_down[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment]['probe']['start time'][mouse][0]
# add data for stats
mouse_data[0].append(int(edginess[trial_num] > HV_cutoff))
mouse_data[1].append(edginess[trial_num])
mouse_data[2].append(prev_edginess[trial_num])
mouse_data[3].append(self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment][condition]['start time'][mouse][0])
mouse_ID_trial[trial_num] = m
t += 1
t_total += 1
#append data for stats
if mouse_data[0]:
all_data[0].append(mouse_data[0])
all_data[1].append(mouse_data[1])
all_data[2].append(mouse_data[2])
all_data[3].append(mouse_data[3])
all_conditions.append(c)
mouse_ID.append(m); m+= 1
else:
print(mouse)
print('0 trials')
# get prev homings
time_to_shelter_all.append(time_to_shelter)
dist_data_EV_other_all = np.append(dist_data_EV_other_all, dist_to_other_SH[edginess > HV_cutoff])
# print(t_total)
''' plot edginess by condition '''
# get the data
# data = abs(edginess)
data = edginess
plot_data = data[~np.isnan(data)]
# print(np.percentile(plot_data, 25))
# print(np.percentile(plot_data, 50))
# print(np.percentile(plot_data, 75))
# print(np.mean(plot_data > HV_cutoff))
# plot each trial
scatter_axis = scatter_the_axis(c, plot_data)
ax.scatter(scatter_axis[plot_data>HV_cutoff], plot_data[plot_data>HV_cutoff], color=edge_vector_color[::-1], s=15, zorder = 99)
ax.scatter(scatter_axis[plot_data<=HV_cutoff], plot_data[plot_data<=HV_cutoff], color=homing_vector_color[::-1], s=15, zorder = 99)
bp = ax.boxplot([plot_data, [0,0]], positions = [3 * c - .2, -10], showfliers=False, zorder=99)
plt.setp(bp['boxes'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['whiskers'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['medians'], linewidth=2)
ax.set_xlim([-1, 3 * len(self.experiments) - 1])
# ax.set_ylim([-.1, 1.15])
ax.set_ylim([-.1, 1.3])
#do kde
try:
if 'Square' in experiment:
kde = fit_kde(plot_data, bw=.06)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=.8, color=[.5,.5,.5], violin=False, clip=False, cutoff = HV_cutoff+0.0000001, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
ax.set_ylim([-1.5, 1.5])
else:
kde = fit_kde(plot_data, bw=.04)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=1.3, color=[.5,.5,.5], violin=False, clip=True, cutoff = HV_cutoff, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
except: pass
# plot the polar plot or initial trajectories
# plt.figure(fig4.number)
fig4 = plt.figure(figsize=( 5, 5))
# ax4 = plt.subplot(1,len(self.experiments),len(self.experiments) - c, polar=True)
ax4 = plt.subplot(1, 1, 1, polar=True)
plt.axis('off')
ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax4.set_xlim([-np.pi / 2 - .1, 0])
# ax4.set_xlim([-np.pi - .1, 0])
mean_value_color = max(0, min(1, np.mean(plot_data)))
mean_value_color = np.sum(plot_data > HV_cutoff) / len(plot_data)
mean_value = np.mean(plot_data)
value_color = mean_value_color * edge_vector_color[::-1] + (1 - mean_value_color) * homing_vector_color[::-1]
ax4.arrow(mean_value + 3 * np.pi / 2, 0, 0, 1.9, color=[abs(v)**1 for v in value_color], alpha=1, width = 0.05, linewidth=2)
ax4.plot([0, 0 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
ax4.plot([0, 1 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
# ax4.plot([0, -1 + 3 * np.pi / 2], [0, 2.25], color=[.5, .5, .5], alpha=1, linewidth=1, linestyle='--')
scatter_axis_EV = scatter_the_axis_polar(plot_data[plot_data > HV_cutoff], 2.25, 0) #0.05
scatter_axis_HV = scatter_the_axis_polar(plot_data[plot_data <= HV_cutoff], 2.25, 0)
ax4.scatter(plot_data[plot_data > HV_cutoff] + 3 * np.pi/2, scatter_axis_EV, s = 30, color=edge_vector_color[::-1], alpha = .8, edgecolors = None)
ax4.scatter(plot_data[plot_data <= HV_cutoff] + 3 * np.pi/2, scatter_axis_HV, s = 30, color=homing_vector_color[::-1], alpha=.8, edgecolors = None)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.png'), format='png', transparent=True, bbox_inches='tight', pad_inches=0)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.eps'), format='eps', transparent=True, bbox_inches='tight', pad_inches=0)
# print(len(plot_data))
if len(plot_data) > 1 and False: # or True:
''' plot the correlation '''
# do both prev homings and time in center # np.array(time_since_down) # 'Time since removal'
for plot_data_corr, fig_corr, ax_corr, data_label in zip([prev_edginess, time_in_center], [fig2, fig3], [ax2, ax3], ['Prior homings','Exploration']): #
plot_data_corr = plot_data_corr[~np.isnan(data)]
# plot data
ax_corr.scatter(plot_data_corr, plot_data, color=colors[c], s=60, alpha=1, edgecolors=colors[c]/2, linewidth=1) #color=[.5, .5, .5] #edgecolors=[.2, .2, .2]
# do correlation
r, p = scipy.stats.pearsonr(plot_data_corr, plot_data)
print(r, p)
# do linear regression
plot_data_corr, prediction = do_linear_regression(plot_data, plot_data_corr)
# plot linear regresssion
ax_corr.plot(plot_data_corr, prediction['Pred'].values, color=colors[c], linewidth=1, linestyle='--', alpha=.7) #color=[.0, .0, .0]
ax_corr.fill_between(plot_data_corr, prediction['lower'].values, prediction['upper'].values, color=colors[c], alpha=.075) #color=[.2, .2, .2]
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.png'), format='png')
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# test correlation and stats thru permutation test
# data_x = list(np.array(all_data[2])[np.array(all_conditions) == c])
# data_y = list(np.array(all_data[1])[np.array(all_conditions) == c])
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all = True)
print(num_trials_escape)
print(num_trials_total)
print(num_trials_escape / num_trials_total)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
time_to_shelter_all = np.concatenate(list(flatten(time_to_shelter_all))).astype(float)
np.percentile(time_to_shelter_all, 25)
np.percentile(time_to_shelter_all, 75)
group_A = list(np.array(all_data[0])[np.array(all_conditions) == 2])
group_B = list(np.array(all_data[0])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
group_A = list(np.array(all_data[1])[(np.array(all_conditions) == 1) + (np.array(all_conditions) == 2)])
group_B = list(np.array(all_data[1])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
import pandas
df = pandas.DataFrame(data={"mouse_id": mouse_ID, "condition": all_conditions, "x-data": all_data[2], "y-data": all_data[1]})
df.to_csv("./Foraging Path Types.csv", sep=',', index=False)
group_B = list(flatten(np.array(all_data[0])[np.array(all_conditions) == 1]))
np.sum(group_B) / len(group_B)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 75)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 75)
group_A = [[d] for d in abs(time_since_down[edginess > HV_cutoff])]
group_B = [[d] for d in abs(time_since_down[edginess < HV_cutoff])]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
WE = np.concatenate(was_escape)
TTS_spont = np.concatenate(time_to_shelter)[~WE]
TTS_escape = np.concatenate(time_to_shelter)[WE]
trials = np.array(list(flatten(all_data[3])))
edgy = np.array(list(flatten(all_data[0])))
np.mean(edgy[trials == 0])
np.mean(edgy[trials == 1])
np.mean(edgy[trials == 2])
np.mean(edgy[trials == 3])
np.mean(edgy[trials == 4])
np.mean(edgy[trials == 5])
np.mean(edgy[trials == 6])
np.mean(edgy[trials == 7])
np.mean(edgy[trials == 8])
np.mean(edgy[trials == 9])
np.mean(edgy[trials == 10])
np.mean(edgy[trials == 11])
np.mean(edgy[trials == 12])
np.mean(edgy[trials == 13])
'''
TRADITIONAL METRICS
'''
def plot_metrics_by_strategy(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
non_escape_color = np.array([0,0,0])
ETD = 10#0
traj_loc = 40
fps = 30
# escape_duration = 12 #12 #9 #12 9 for food 12 for dark
HV_cutoff = .681 #.65
edgy_cutoff = .681
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, duration_RT, duration, prev_edginess, edginess, _, _, _, _, \
_, _, _, _, _, scaling_factor, time, trial_num, trials, edginess, avg_speed, avg_speed_RT, peak_speed, RT, escape_speed, strategy = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
mouse_id = efficiency.copy()
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop across all trials
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
if 'food' in experiment: escape_duration = 9
else: escape_duration = 12
trial_num += 1
# impose coniditions - escape duration
end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# needs to start at top
if y_start > 25: continue
if abs(x_start - 50) > 30: continue
# get the strategy used
# edgy_escape = self.analysis[experiment][condition]['edginess'][mouse][trial] > edgy_cutoff
# is it a homing vector
# strategy_code = 0
# TEMPORARY COMMENTING
# if not edgy_escape:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] < HV_cutoff: strategy_code = 0 # homing vector
# else: continue
# else:
# get the strategy used -- NUMBER OF PREVIOUS EDGE VECTOR HOMINGS
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, [], [],
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
if t > 2: continue
# if c == 0 and trial: continue
# if c == 1 and trial != 2: continue
t+=1
# if prev_edginess[trial_num] >= HV_cutoff: strategy_code = 1 # path learning
# elif prev_edginess[trial_num] < HV_cutoff: strategy_code = 2 # map-based
# else: continue
# how many prev homings to that edge: if 0, then map-based, if >1, then PL
if len(self.analysis[experiment]['probe']['start time'][mouse]):
edge_time = self.analysis[experiment]['probe']['start time'][mouse][0] - 1
else: edge_time = 19
edge_time = np.min((edge_time, self.analysis[experiment][condition]['start time'][mouse][trial]))
# print(edge_time)
num_edge_vectors, _ = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD=ETD, time_threshold=edge_time, other_side = False)
num_edge_vectors = get_num_homing_vectors(self, experiment, condition, mouse, trial, spontaneous = False, time_threshold = edge_time)
print(num_edge_vectors)
# if 'wall up' in experiment and 'no' in condition: num_edge_vectors = 0
# print(num_edge_vectors)
if False or True:
if num_edge_vectors == 1:
strategy_code = 1
# print('EV -- ' + mouse + ' - trial ' + str(trial))
elif num_edge_vectors == 0:
strategy_code = 0
# print('NO EV -- ' + mouse + ' - trial ' + str(trial))
else: continue
else:
strategy_code = 0
strategy[trial_num] = strategy_code
# add data for each metric
RT[trial_num] = self.analysis[experiment][condition]['RT'][mouse][trial]
avg_speed[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)]) * scaling_factor * 30
avg_speed_RT[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps + int(RT[trial_num]*30) : 10*fps+int(end_time)]) * scaling_factor * 30
peak_speed[trial_num] = np.max(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)])*fps*scaling_factor
escape_speed[trial_num] = self.analysis[experiment][condition]['optimal path length'][mouse][trial] * scaling_factor / (end_time/30)
efficiency[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal path length'][mouse][trial] / \
self.analysis[experiment][condition]['full path length'][mouse][trial]))
efficiency_RT[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / \
self.analysis[experiment][condition]['RT path length'][mouse][trial]))
duration_RT[trial_num] = (end_time / fps - RT[trial_num]) / self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / scaling_factor * 100
duration[trial_num] = end_time / fps / self.analysis[experiment][condition]['optimal path length'][mouse][trial] / scaling_factor * 100
# duration[trial_num] = trial
# duration_RT[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial]
avg_speed[trial_num] = self.analysis[experiment][condition]['time exploring far (pre)'][mouse][trial] / 60
# add data for stats
mouse_id[trial_num] = m
m+=1
# for metric, data in zip(['Reaction time', 'Peak speed', 'Avg speed', 'Path efficiency - RT','Duration - RT', 'Duration'],\
# [RT, peak_speed, avg_speed_RT, efficiency_RT, duration_RT, duration]):
# for metric, data in zip(['Reaction time', 'Avg speed', 'Path efficiency - RT'], #,'Peak speed', 'Duration - RT', 'Duration'], \
# [RT, avg_speed_RT, efficiency_RT]): #peak_speed, , duration_RT, duration
for metric, data in zip(['Path efficiency - RT'], [efficiency_RT]):
# for metric, data in zip([ 'Duration - RT'],
# [ duration_RT]):
# for metric, data in zip(['trial', 'time', 'time exploring back'],
# [duration, duration_RT, avg_speed]):
# format data
x_data = strategy[~np.isnan(data)]
y_data = data[~np.isnan(data)]
if not c: OF_data = y_data
# make figure
fig, ax = plt.subplots(figsize=(11, 9))
plt.axis('off')
# ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
# ax.set_title(metric)
if 'Reaction time' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [2, 2], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [3, 3], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [4, 4], linestyle='--', color=[.5, .5, .5, .5])
elif 'Peak speed' in metric:
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [80, 80], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [120, 120], linestyle='--', color=[.5, .5, .5, .5])
elif 'Avg speed' in metric:
ax.plot([-.75, 3], [25, 25], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [50, 50], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [75, 75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
elif 'Path efficiency' in metric:
ax.plot([-.75, 3], [.5,.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [.75, .75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
elif 'Duration' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5, 5], linestyle='--', color=[.5, .5, .5, .5])
elif 'time' == metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [20, 20], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [30, 30], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
elif 'exploring' in metric:
ax.plot([-.75, 3], [2.5, 2.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5.0, 5.0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [7.5, 7.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
#initialize stats array
stats_data = [[], [], []]
# go thru each strategy
for s in [0,1,2]:
# format data
if not np.sum(x_data==s): continue
plot_data = y_data[x_data==s]
median = np.percentile(plot_data, 50);
third_quartile = np.percentile(plot_data, 75);
first_quartile = np.percentile(plot_data, 25)
# print(first_quartile)
# print(median)
# print(third_quartile)
# if 'Reaction' in metric: print(str(first_quartile), str(median), str(third_quartile))
IQR = third_quartile - first_quartile
# remove outliers
if not metric == 'trial':
outliers = abs(plot_data - median) > 2*IQR
# plot_data = plot_data[~outliers]
# plot all data
ax.scatter(np.ones_like(plot_data)*s, plot_data, color=[0,0,0], s=30, zorder = 99)
# plot kde
if 'efficiency' in metric: bw_factor = .02
elif 'speed' in metric or 'efficiency' in metric or metric == 'time': bw_factor = .04
elif 'exploring' in metric: bw_factor = .06
elif 'Duration' in metric: bw_factor = .07
else: bw_factor = .09
kde = fit_kde(plot_data, bw=np.median(y_data)*bw_factor)
plot_kde(ax, kde, plot_data, z= s + .1, vertical=True, normto=.4, color=[.75, .75, .75], violin=False, clip=True)
# plot errorbar
ax.errorbar(s - .15, median, yerr=np.array([[median - first_quartile], [third_quartile - median]]), color=[0, 0, 0], capsize=10, capthick=3, alpha=1, linewidth=3)
ax.scatter(s - .15, median, color=[0, 0, 0], s=175, alpha=1)
# print(len(plot_data))
# get mouse ids for stats
mouse_id_stats = mouse_id[~np.isnan(data)]
mouse_id_stats = mouse_id_stats[x_data==s]
if not metric == 'trial': mouse_id_stats = mouse_id_stats[~outliers]
# for m in np.unique(mouse_id_stats):
# stats_data[s].append( list(plot_data[mouse_id_stats==m]) )
print(metric)
# for ss in [[0,1]]: #, [0,2], [1,2]]:
# group_A = stats_data[ss[0]]
# group_B = stats_data[ss[1]]
# permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
# save figure
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
plt.close('all')
group_A = [[e] for e in tr1_eff]
group_B = [[e] for e in tr3_eff]
group_C = [[e] for e in OF_eff]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
permutation_test(group_A, group_C, iterations=10000, two_tailed=True)
permutation_test(group_B, group_C, iterations=10000, two_tailed=True)
'''
DIST OF TURN ANGLES
'''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig, ax = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_angles_pre = []
# all_angles_escape = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# turn_angles_pre = []
# turn_angles_escape = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # loop across all trials
# t = 0
# for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
# # impose coniditions - escape duration
# end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
# if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
#
#
# ## COMMENT ONE OR THE OTHER IF TESTING PRE OR ESCAPE
# #pre
# # if trial < 2: continue
# # if t: continue
#
# # escape
# if t > 2: continue
#
# # skip certain trials
# y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
# x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# # needs to start at top
# if y_start > 25: continue
# if abs(x_start - 50) > 30: continue
#
# turn_angles_pre.append(list(abs(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))) # >145
# turn_angles_escape.append(abs(self.analysis[experiment][condition]['movement'][mouse][trial][2])) # >145
# #
# # turn_angles_pre.append(list(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))
# # turn_angles_escape.append(self.analysis[experiment][condition]['movement'][mouse][trial][2])
#
# t += 1
#
#
#
# # format data
# hist_data_pre = np.array(list(flatten(turn_angles_pre)))
# hist_data_escape = np.array(list(flatten(turn_angles_escape)))
#
# # for permutation test
# # all_angles_pre.append(turn_angles_pre)
# # all_angles_escape.append([[tae] for tae in turn_angles_escape])
#
# ax.set_title('Prior movement angles')
# ax2.set_title('Escape movement angles')
# ax.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
#
# # format data
# bin_width = 30
# hist_pre, n, _ = ax.hist(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_pre) * 1/ len(hist_data_pre))
# hist_escape, n, _ = ax2.hist(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_escape) * 1/ len(hist_data_escape))
#
# count_pre, n = np.histogram(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width))
# count_escape, n = np.histogram(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width))
#
# # for chi squared
# all_angles_pre.append(count_pre)
# all_angles_escape.append(count_escape)
#
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# # save figure
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
# plt.show()
#
#
# scipy.stats.chi2_contingency(all_angles_pre)
# scipy.stats.chi2_contingency(all_angles_escape)
#
#
# group_A = all_angles_pre[0]
# group_B = all_angles_pre[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_angles_escape[0]
# group_B = all_angles_escape[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
#
# '''
# DIST OF EDGE VECTORS
# '''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# dist_thresh = 5
# time_thresh = 20
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig1, ax1 = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_EVs = []
# all_HVs = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# EVs = []
# HVs = []
# edge_vector_time_exp = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # just take the last trial
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if trial < 0:
# if condition == 'obstacle':
# condition_use = 'no obstacle'
# trial = 0
# elif condition == 'no obstacle':
# condition_use = 'obstacle'
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if mouse == 'CA7220': trial = 1 #compensate for extra vid
# else: condition_use = condition
#
# # get the prev homings
# SH_data = self.analysis[experiment][condition_use]['prev homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# SH_x = np.array(SH_data[0])
#
# # only use spontaneous
# stim_evoked = np.array(SH_data[4])
# SH_x = SH_x[~stim_evoked]
# homing_time = homing_time[~stim_evoked]
#
# # normalize to 20 min
# SH_x = SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_edge_vectors = np.sum(abs(SH_x - 25) < dist_thresh) + np.sum(abs(SH_x - 75) < dist_thresh)
# num_homing_vectors = np.sum(abs(SH_x - 50) < dist_thresh)
# print(num_edge_vectors)
#
#
# # get the prev anti homings
# anti_SH_data = self.analysis[experiment][condition_use]['prev anti-homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(anti_SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# anti_SH_x = np.array(anti_SH_data[0])
#
# # limit to 20 min
# anti_SH_x = anti_SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_anti_edge_vectors = np.sum(abs(anti_SH_x - 25) < dist_thresh) + np.sum(abs(anti_SH_x - 75) < dist_thresh)
# num_anti_homing_vectors = np.sum(abs(anti_SH_x - 50) < dist_thresh)
# print(num_anti_edge_vectors)
#
# # append to list
# EVs.append(num_edge_vectors + num_anti_edge_vectors )
# HVs.append(num_edge_vectors + num_anti_edge_vectors - (num_homing_vectors + num_anti_homing_vectors))
# print(EVs)
# all_EVs.append(EVs)
# all_HVs.append(HVs)
#
# # make timing hist
# plt.figure()
# plt.hist(list(flatten(edge_vector_time_exp)), bins=np.arange(0, 22.5, 2.5)) #, color=condition_colors[c])
#
# # plot EVs and HVs
# for plot_data, ax, fig in zip([EVs, HVs], [ax1, ax2], [fig1, fig2]):
#
# scatter_axis = scatter_the_axis(c * 4 / 3 + .5 / 3, plot_data)
# ax.scatter(scatter_axis, plot_data, color=[0, 0, 0], s=25, zorder=99)
# # do kde
# kde = fit_kde(plot_data, bw=.5)
# plot_kde(ax, kde, plot_data, z=4 * c + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=False) # True)
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
#
# plt.show()
#
#
# group_A = all_EVs[1]
# group_B = all_EVs[2]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_HVs[0]
# group_B = all_HVs[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
'''
PREDICTION PLOTS, BY TURN ANGLE OR EXPLORATION/EDGINESS
|
|
v
'''
def plot_prediction(self):
by_angle_not_edginess = False
if by_angle_not_edginess:
# initialize parameters
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
plt.close(fig2); plt.close(fig3)
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
IC_x_all, IC_y_all, IC_angle_all, IC_time_all, turn_angles_all = [], [], [], [], []
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, x_pred, y_pred, angle_pred, time_pred, mean_pred, initial_body_angle, initial_x, initial_y, x_edge, _, \
_, _, _, _, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# initialize array to fill in with each trial's data
edginess, end_idx, angle_turned, _, _, prev_edginess, scaling_factor, _, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
for shuffle_time in [False, True]:
angle_turned_all, x_pred_all, y_pred_all, angle_pred_all, time_pred_all, mean_pred_all = [], [], [], [], [], []
num_repeats = shuffle_time * 499 + 1 #* 19
num_repeats = shuffle_time * 19 + 1 # * 19
prediction_scores_all = []
for r in range(num_repeats):
trial_num = -1
# loop over each experiment and condition
for e, (experiment_real, condition_real) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse_real in enumerate(self.analysis[experiment_real][condition_real]['start time']):
if self.analysis_options['control'] and not mouse_real=='control': continue
if not self.analysis_options['control'] and mouse_real=='control': continue
# loop over each trial
prev_homings = []
t = 0
for trial_real in range(len(self.analysis[experiment_real][condition_real]['end time'][mouse_real])):
trial_num += 1
# impose conditions
if t > 2: continue
end_idx[trial_num] = self.analysis[experiment_real][condition_real]['end time'][mouse_real][trial_real]
if np.isnan(end_idx[trial_num]): continue
if (end_idx[trial_num] > escape_duration * fps): continue
# skip certain trials
y_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][1][0] * scaling_factor
x_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
# if shuffle_time:
# experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
# else:
# experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' just use real mouse '''
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' control ICs, real escape '''
# # get the angle turned during the escape
angle_turned[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][2]
# angle_turned[trial_num] = abs(self.analysis[experiment_real][condition_real]['edginess'][mouse_real][trial_real])
# get the angle turned, delta x, delta y, and delta phi of previous homings
bout_start_angle = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
bout_start_position = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0]
start_time = self.analysis[experiment_real][condition_real]['start time'][mouse_real][trial_real]
# get initial conditions and endpoint quantities
IC_x = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][0][-ETD:])
IC_y = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][1][-ETD:])
IC_angle = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][2][-ETD:])
IC_time = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][3][-ETD:])
turn_angles = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3][-ETD:])
# MOE = 10
# x_edge_trial = self.analysis[experiment][condition]['x edge'][mouse][trial]
# SH_x = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][0][-ETD:])
# if x_edge_trial > 50 and np.sum(SH_x > 25 + MOE):
# IC_x = IC_x[SH_x > 25 + MOE]
# IC_y = IC_y[SH_x > 25 + MOE]
# IC_angle = IC_angle[SH_x > 25 + MOE]
# IC_time = IC_time[SH_x > 25 + MOE]
# turn_angles = turn_angles[SH_x > 25 + MOE]
# elif np.sum(SH_x > 75 - MOE):
# IC_x = IC_x[SH_x > 75 - MOE]
# IC_y = IC_y[SH_x > 75 - MOE]
# IC_angle = IC_angle[SH_x > 75 - MOE]
# IC_time = IC_time[SH_x > 75 - MOE]
# turn_angles = turn_angles[SH_x > 75 - MOE]
if not shuffle_time: # gather previous movements
IC_x_all = np.concatenate((IC_x_all, IC_x))
IC_y_all = np.concatenate((IC_y_all, IC_y))
IC_angle_all = np.concatenate((IC_angle_all, IC_angle))
IC_time_all = np.concatenate((IC_time_all, IC_time))
turn_angles_all = np.concatenate((turn_angles_all, turn_angles))
else:
# sample randomly from these movements
random_idx = np.random.choice(len(IC_x_all), len(IC_x_all), replace = False)
IC_x = IC_x_all[random_idx]
IC_y = IC_y_all[random_idx]
IC_angle = IC_angle_all[random_idx]
IC_time = IC_time_all[random_idx]
turn_angles = turn_angles_all[random_idx]
# calculate difference in ICs
delta_x = abs( np.array(IC_x - bout_start_position[0]) )
delta_y = abs( np.array(IC_y - bout_start_position[1]) )
delta_angle = abs( np.array(IC_angle - bout_start_angle) )
delta_angle[delta_angle > 180] = 360 - delta_angle[delta_angle > 180]
delta_time = start_time - np.array(IC_time)
''' prediction data -- angle turned is a function of prev movement and ICs '''
x_weights = (1 / (delta_x+.0001)) / np.sum(1/(delta_x+.0001))
y_weights = (1 / (delta_y+.0001)) / np.sum(1 / (delta_y+.0001))
angle_weights = (1 / (delta_angle+.0001)) / np.sum(1 / (delta_angle+.0001))
time_weights = (1 / (delta_time+.0001)) / np.sum(1 / (delta_time+.0001))
x_pred[trial_num] = np.sum(turn_angles * x_weights)
y_pred[trial_num] = np.sum(turn_angles * y_weights)
angle_pred[trial_num] = np.sum(turn_angles * angle_weights)
time_pred[trial_num] = np.sum(turn_angles * time_weights) * 0
mean_pred[trial_num] = np.mean(turn_angles) * 0
# try mean pred is the *closest* angle to real
# x_pred[trial_num] = 0
# y_pred[trial_num] = 0
# angle_pred[trial_num] = 0
# time_pred[trial_num] = 0
# mean_pred[trial_num] = turn_angles[np.argmin( abs(turn_angles - angle_turned[trial_num]) )]
# ''' turn angle prediction to edginess prediction '''
if not shuffle_time:
edginess[trial_num] = abs(self.analysis[experiment][condition]['edginess'][mouse][trial])
initial_body_angle[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
initial_x[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][0]
initial_y[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][1]
x_edge[trial_num] = self.analysis[experiment][condition]['x edge'][mouse][trial_real]
# add mouse and trial to list of mice and trials
if not shuffle_time:
mouse_trial_list.append([experiment, condition, mouse, trial])
t+=1
''' concatenate??... '''
# angle_turned_all = np.concatenate((angle_turned_all, angle_turned))
#
# x_pred_all = np.concatenate((x_pred_all, x_pred))
# y_pred_all = np.concatenate((y_pred_all, y_pred))
# angle_pred_all = np.concatenate((angle_pred_all, angle_pred))
# time_pred_all = np.concatenate((time_pred_all, time_pred ))
# mean_pred_all = np.concatenate((mean_pred_all, mean_pred ))
#
#
# IC_angle_array = np.ones((len(angle_turned_all[~np.isnan(angle_turned_all)]), 5))
# angle_metrics = [x_pred_all[~np.isnan(angle_turned_all)], y_pred_all[~np.isnan(angle_turned_all)], angle_pred_all[~np.isnan(angle_turned_all)], \
# time_pred_all[~np.isnan(angle_turned_all)], mean_pred_all[~np.isnan(angle_turned_all)]]
# for i, angle_metric in enumerate(angle_metrics): #
# IC_angle_array[:, i] = angle_metric
#
# # get the data
# predict_data_y_all = [ angle_turned_all[~np.isnan(angle_turned_all)].reshape(-1, 1)] # for the movements input data
''' don't concatenate... '''
IC_angle_array = np.ones((len(angle_turned[~np.isnan(angle_turned)]), 5))
angle_metrics = [x_pred[~np.isnan(angle_turned)], y_pred[~np.isnan(angle_turned)],
angle_pred[~np.isnan(angle_turned)], \
time_pred[~np.isnan(angle_turned)], mean_pred[~np.isnan(angle_turned)]]
for i, angle_metric in enumerate(angle_metrics): #
IC_angle_array[:, i] = angle_metric
# get the data
predict_data_y_all_angle = [angle_turned[~np.isnan(angle_turned)].reshape(-1, 1)] # for the movements input data
predict_data_y_all_edgy = [edginess[~np.isnan(edginess)].reshape(-1, 1)] # for the movements input data
data_y_labels = ['angle']
predict_data_x_all = [IC_angle_array] # turn angles
predict_data_y_all = predict_data_y_all_angle # angles
''' predict edginess from turn angle '''
predict_edginess = True
if predict_edginess:
if not shuffle_time:
initial_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1)
initial_x = initial_x[~np.isnan(initial_x)].reshape(-1, 1)
initial_y = initial_y[~np.isnan(initial_y)].reshape(-1, 1)
x_edge = x_edge[~np.isnan(x_edge)].reshape(-1, 1)
# create the model
LR = linear_model.Ridge(alpha=.1)
# train the model
LR.fit(predict_data_x_all[0], predict_data_y_all_angle[0])
print(LR.score(predict_data_x_all[0], predict_data_y_all_angle[0]))
# get the model prediction
# model_prediction = LR.predict(predict_data_x_all[0])
model_prediction = predict_data_y_all_angle[0]
# predict body angles after turn
predicted_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1) - model_prediction
predicted_body_angle[predicted_body_angle >180] = predicted_body_angle[predicted_body_angle >180] - 360
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle < 90)] = -1 # super edgy to the right
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle > 90)] = 1 # super edgy to the right
# predict position at y = 40; set reasonable boundaries
x_at_40 = np.maximum(15 * np.ones_like(initial_x), np.minimum(90 * np.ones_like(initial_x),
initial_x - (40 - initial_y) / np.tan(np.deg2rad(predicted_body_angle)) ))
# get edginess
y_pos_end = 86.5; x_pos_end = 50; y_edge = 50
slope = (y_pos_end - initial_y) / (x_pos_end - (initial_x+.0001))
intercept = initial_y - initial_x * slope
distance_to_line = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
homing_vector_at_center = (40 - intercept) / slope
# do line from starting position to edge position
slope = (y_edge - initial_y) / (x_edge - initial_x)
intercept = initial_y - initial_x * slope
distance_to_edge = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
# compute the max possible deviation
edge_vector_at_center = (40 - intercept) / slope
line_to_edge_offset = abs(homing_vector_at_center - edge_vector_at_center) # + 5
# get index at center point (wall location)
# prev_edginess = np.maximum(np.zeros_like(distance_to_line), np.minimum(1.2*np.ones_like(distance_to_line),
# (distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset) ))
prev_edginess = abs((distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset))
predict_data_x_all = [prev_edginess] # predicted prev edginess #scipy.stats.zscore(
predict_data_y_all = predict_data_y_all_edgy # edginess
# edgy input colors
input_colors = [ [[0, .6, .4], [.5,.5,.5]], [[0, .6, .4], [.5,.5,.5]], [[.6, 0, .4], [.5,.5,.5]] ]
# split the data for cross val
num_trials = 1000 - 985 * shuffle_time #985
# loop acros angle prediction and traj prediction
for i, (fig, ax, predict_data_x) in enumerate(zip([fig1, fig2, fig3],[ax1, ax2, ax3], predict_data_x_all)):
# get prediction data
predict_data_y = predict_data_y_all[i]
# get color
color = input_colors[i][int(shuffle_time)]
# initialize prediction arrays
prediction_scores = np.zeros(num_trials)
for j in range(num_trials):
test_size = 0.5
# test_size = 0.25
# if shuffle_time: test_size = 0.25
# get x-val set
X_train, X_test, y_train, y_test = train_test_split(predict_data_x, \
predict_data_y, test_size=test_size, random_state=j)
# create the model
LR = linear_model.Ridge(alpha = .1) # .15, .5
# train the model
LR.fit(X_train, y_train)
# get the score
prediction_scores[j] = LR.score(X_test, y_test)
# exclude super negative ones
# prediction_scores = prediction_scores[prediction_scores > np.percentile(prediction_scores, 10)]
# put into larger array
prediction_scores_all = np.concatenate((prediction_scores_all, prediction_scores))
print(np.median(prediction_scores_all))
# exclude super negative ones
# prediction_scores_all = prediction_scores_all[prediction_scores_all > np.percentile(prediction_scores_all, 5)]
#do kde
kde = fit_kde(prediction_scores_all, bw=.03) # .04
plot_kde(ax, kde, prediction_scores_all, z = 0, vertical=False, color=color, violin=False, clip=False) # True)
#plt.show()
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.png'), format='png')
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
print('hi')
else:
'''
PREDICTION PLOTS EDGINESS OR BY **EXPLORATION**
'''
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# mean_types = ['even', 'space', 'angle'] #, 'time', 'shelter time']
mean_types = ['space', 'angle', 'shelter time'] #, 'escape']
mean_type = 'even'
mean_colors = [[0, .6, .4], [0, .6, .8], [0, .6, .8], [.4, 0, 1] ]
mean_colors = [[0, .6, .4], [.4, 0, .8], [0, .6, .8], [.5, .5, .5]]
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
for m, mean_type in enumerate(mean_types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
# initialize array to fill in with each trial's data
edginess, end_idx, angle_turned, _, _, prev_edginess, scaling_factor, _, trial_num, prev_movement_and_ICs, data_y_for_prev_movement, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_front_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre, \
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge, time_exploring_other_edge, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
for shuffle_time in [False]:
num_repeats = shuffle_time * 19 + 1
for r in range(num_repeats):
trial_num = -1
# loop over each experiment and condition
for e, (experiment_real, condition_real) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse_real in enumerate(self.analysis[experiment_real][condition_real]['start time']):
if self.analysis_options['control'] and not mouse_real=='control': continue
if not self.analysis_options['control'] and mouse_real=='control': continue
# loop over each trial
prev_homings = []
t = 0
for trial_real in range(len(self.analysis[experiment_real][condition_real]['end time'][mouse_real])):
trial_num += 1
# impose conditions
if t > 2: continue
end_idx[trial_num] = self.analysis[experiment_real][condition_real]['end time'][mouse_real][trial_real]
if np.isnan(end_idx[trial_num]): continue
if (end_idx[trial_num] > escape_duration * fps): continue
# skip certain trials
y_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][1][0] * scaling_factor
x_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
if shuffle_time:
experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
else:
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
# just add real data for edginess etc
if not shuffle_time:
# add data
edginess[trial_num] = abs(self.analysis[experiment][condition]['edginess'][mouse][trial])
# get previous edginess
time_to_shelter, SR = get_prev_edginess(ETD, condition_real, experiment_real, mouse_real, prev_edginess, dist_to_SH,
dist_to_other_SH, scaling_factor, self, traj_loc, trial_real, trial_num, edginess,
[], [], mean = mean_type, get_initial_conditions=True)
# _, _, prev_edginess_all, elig_idx = get_all_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH, scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
# add data
fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV,
num_prev_homings_front_EV, num_prev_homings_other_EV,
num_prev_homings_HV,
time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post,
time_exploring_obstacle_pre,
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge,
time_exploring_other_edge,
self, time, trial, trial_num, trials, edginess, t)
# add mouse and trial to list of mice and trials
if not shuffle_time:
mouse_trial_list.append([experiment, condition, mouse, trial])
t+=1
# format mean prior trajectory
if not shuffle_time:
prev_edginess = prev_edginess[~np.isnan(edginess)]
exploration_array = np.ones((len(edginess[~np.isnan(edginess)]), 2))
exploration_metrics = [time_exploring_far_pre[~np.isnan(edginess)], time_exploring_far_post[~np.isnan(edginess)]]
for i, exploration_metric in enumerate(exploration_metrics): #
exploration_array[:, i] = exploration_metric
if shuffle_time: # regress out other variable
m = (((np.mean(prev_edginess) * np.mean(exploration_array[:, i])) - np.mean(prev_edginess * exploration_array[:, i])) /
((np.mean(prev_edginess) ** 2) - np.mean(prev_edginess ** 2)))
regressed_data = exploration_array[:, i] - prev_edginess * m
exploration_array[:, i] = regressed_data
if shuffle_time: # regress out exploration from mean prior traj
for exploration_metric in exploration_metrics:
m = (((np.mean(exploration_metric) * np.mean(prev_edginess)) - np.mean(exploration_metric * prev_edginess)) /
((np.mean(exploration_metric) ** 2) - np.mean(exploration_metric ** 2)))
regressed_data = prev_edginess - exploration_array[:, 0] * m
prev_edginess = regressed_data
# get the data
predict_data_y_all = [ edginess[~np.isnan(edginess)].reshape(-1, 1), # for the EXPLORATION input data
edginess[~np.isnan(edginess)].reshape(-1, 1)] # for the mean edginess input data
# turn_angle_for_prev_movement ] # for the movements input data
data_y_labels = ['exploration','trajectory'] #, 'angle']
predict_data_x_all = [exploration_array, # exploration data
prev_edginess.reshape(-1, 1)]#, # mean prev edginess
# prev_movements_and_ICs_array] # all prev homing movements
# edgy input colors
input_colors = [ [[0, .6, .4], [.5,.5,.5]], [[0, .6, .4], [.5,.5,.5]], [[.6, 0, .4], [.5,.5,.5]] ]
# split the data for cross val
num_trials = 1000
# loop acros angle prediction and traj prediction
for i, (fig, ax, predict_data_x) in enumerate(zip([fig1, fig2, fig3],[ax1, ax2, ax3], predict_data_x_all)):
# get prediction data
predict_data_y = predict_data_y_all[i]
# get color
color = input_colors[i][int(shuffle_time)]
# color = mean_colors[m]
# initialize prediction arrays
prediction_scores = np.zeros(num_trials)
for j in range(num_trials):
test_size = 0.5
if shuffle_time and i==2:
test_size = .025
# get x-val set
X_train, X_test, y_train, y_test = train_test_split(predict_data_x, \
predict_data_y, test_size=test_size, random_state=j)
# create the model
# LR = linear_model.LinearRegression()
# if i:
# LR = linear_model.LogisticRegression()
# else:
LR = linear_model.Ridge(alpha = .1) # .15, .5
# train the model
# try:
LR.fit(X_train, y_train)
# except:
# print('i=h')
# print(LR.coef_)
# get the score
prediction_scores[j] = LR.score(X_test, y_test)
print(data_y_labels[i])
print(np.median(prediction_scores))
# exclude super negative ones
prediction_scores = prediction_scores[prediction_scores > np.percentile(prediction_scores, 10)]
# plot the scores
# ax.scatter(prediction_scores, np.zeros_like(prediction_scores), color=color, s=20, alpha = .1)
#do kde
kde = fit_kde(prediction_scores, bw=.04) # .04
plot_kde(ax, kde, prediction_scores, z = 0, vertical=False, color=color, violin=False, clip=False) # True)
fig.savefig(os.path.join(self.summary_plots_folder,'Prediction of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.png'), format='png')
fig.savefig(os.path.join(self.summary_plots_folder,'Precition of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
print('hi')
# # get the correlation
# r, p = scipy.stats.pearsonr(exploration_array[:, 0], edginess)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# m = (((np.mean(prev_edginess) * np.mean(exploration_array[:, 0])) - np.mean(prev_edginess * exploration_array[:, 0])) /
# ((np.mean(prev_edginess) ** 2) - np.mean(prev_edginess ** 2)))
#
# regressed_data = exploration_array[:, 0] - prev_edginess * m
# r, p = scipy.stats.pearsonr(prev_edginess, regressed_data)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# # get the correlation after regressing out prev edginess
# r, p = scipy.stats.pearsonr(regressed_data, edginess)
# print('r = ' + str(np.round(r, 3)) + '\n= ' + str(np.round(p, 3)))
# #
# def plot_efficiency(self):
# # initialize parameters
# fps = 30
# traj_loc = 40
# escape_duration = 12 # 12 #6
# HV_cutoff = .681
# ETD = 10
# # ax2, fig2, ax3, fig3 = initialize_figures_efficiency(self)
# efficiency_data = [[], [], [], []]
# duration_data = [[], [], [], []]
# # initialize arrays for stats
# efficiency_data_all = []
# duration_data_all = []
# prev_homings_data_all = []
# all_conditions = []
# mouse_ID = [];
# m = 1
# data_condition = ['naive', 'experienced']
# # data_condition = ['food','escape']
# # data_condition = ['OR - EV', 'OR - HV', 'OF']
# fig1, ax1 = plt.subplots(figsize=(13, 5))
#
# colors = [[1,0,0],[0,0,0]]
# kde_colors = [ [1, .4, .4], [.75, .75, .75]]
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring, distance_exploring, time_exploring_obstacle, time_exploring_far, \
# scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
# initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# if 'void' in experiment or 'dark' in experiment:
# escape_duration = 12
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['full path length']):
# # initialize arrays for stats
# efficiency_data_mouse = []
# duration_data_mouse = []
# prev_homings_data_mouse = []
# # control analysis
# if self.analysis_options['control'] and not mouse == 'control': continue
# if not self.analysis_options['control'] and mouse == 'control': continue
# # loop over each trial
# t = 0
# for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
#
# trial_num += 1
# if t > 2 and not 'food' in experiment and not 'void' in experiment: continue
#
# if t > 8: continue
# # print(t)
# # impose coniditions
# end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
# if (end_idx[trial_num] > escape_duration * fps) or np.isnan(end_idx[trial_num]): continue
# # skip certain trials
# y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
# x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# if y_start > 25: continue
# if abs(x_start - 50) > 25: continue # 25
#
# # get prev edginess
# _, _ = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
# scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
#
# # only do predict edgy:
# # if c == 0:
# # if prev_edginess[trial_num] <= HV_cutoff and 'down' in experiment: continue
# # elif c == 1:
# # if prev_edginess[trial_num] > HV_cutoff and 'down' in experiment: continue
#
# # add data
# fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV,
# time_exploring, distance_exploring, time_exploring_obstacle, time_exploring_far,
# self, time, trial, trial_num, trials, edginess, t)
#
# # normalize end idx to
# RT = self.analysis[experiment][condition]['RT'][mouse][trial]
# if not RT:
# print(RT)
# continue
# RT_all[trial_num] = RT
#
# avg_speed[trial_num] = self.analysis[experiment][condition]['RT path length'][mouse][trial] * scaling_factor / (
# (end_idx[trial_num] - RT) / fps)
# # avg_speed[trial_num] = self.analysis[experiment][condition]['full path length'][mouse][trial] * scaling_factor / (end_idx[trial_num] / fps)
#
# end_idx[trial_num] = (end_idx[trial_num] / fps) / self.analysis[experiment][condition]['optimal path length'][mouse][
# trial] / scaling_factor * 100
#
# # add data for stats
# efficiency_data_mouse.append(efficiency[trial_num])
# # duration_data_mouse.append(end_idx[trial_num]) #TEMP COMMENTING
# duration_data_mouse.append(RT)
# prev_homings_data_mouse.append(num_prev_homings_EV[trial_num])
#
# t += 1
#
# # append data for stats
# if efficiency_data_mouse:
# efficiency_data_all.append(efficiency_data_mouse)
# duration_data_all.append(duration_data_mouse)
# prev_homings_data_all.append(prev_homings_data_mouse)
# all_conditions.append(data_condition[c])
# mouse_ID.append(m);
# m += 1
#
# # format end ind
# # end_idx = np.array([e/30 for e in end_idx])
# end_idx[np.isnan(efficiency)] = np.nan
# # loop over data to plot
# for i, (data, data_label) in enumerate(zip([efficiency_RT, end_idx, RT_all, avg_speed, edginess],
# ['Efficiency'])): # , 'Duration', 'Reaction Time', 'Speed', 'Trajectory'])): #edginess, 'Trajectory',
# # for i, (data, data_label) in enumerate(zip([edginess], ['Trajectory'])): # edginess, 'Trajectory',
#
# # for i, (data, data_label) in enumerate(zip([edginess, efficiency, end_idx], ['Trajectory', 'Efficiency', 'Duration'])):
# # for x_data, x_data_label in zip([num_prev_homings], ['Prior homings']):
# plot_data = data[~np.isnan(data)]
#
# # for x_data, x_data_label in zip([trials, time, num_prev_homings_EV, num_prev_homings_HV, prev_edginess, time_exploring, distance_exploring, time_exploring_far, time_exploring_obstacle],
# # ['Trials', 'Time', 'Edge vector homings', 'Homing vector homings', 'Mean prior trajectory','Time exploring', 'Distance explored', 'Time exploring far side', 'Time exploring obstacle']):
#
# for x_data, x_data_label in zip([trials, time_exploring], ['trial number']): # , 'Time exploring']):
#
# print('\nCorrelation between ' + data_label + ' and ' + x_data_label)
#
# # only plot escapes
# data_for_box_plot = data[~np.isnan(data)]
# print(len(data_for_box_plot))
# x_data = x_data[~np.isnan(data)]
#
# # get the correlation
# r, p = scipy.stats.pearsonr(x_data, data_for_box_plot)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# # initialize figure
# plt.title(data_label + ' x ' + x_data_label)
# # set up the figure
# # if data_label=='Efficiency': ax1.set_ylim([-.03, 1.03])
# # elif data_label=='Duration': ax1.set_ylim([-.1, 7])
#
# if np.max(x_data) < 5:
# ax1.set_xticks(np.unique(x_data).astype(int))
# else:
# ax1.set_xticks(np.arange(5, 25, 5))
# # ax1.set_xlim([5,20])
#
# # jitter the axis
# scatter_axis = scatter_the_axis_efficiency(plot_data, x_data + c/3 - .2)
# # plot each trial
# ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
# for x in np.unique(x_data):
# # plot kde
# kde = fit_kde(plot_data[x_data==x], bw=.02) #.2) # .04
# plot_kde(ax1, kde, plot_data[x_data==x], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
#
# # box and whisker
# bp = ax1.boxplot([plot_data[x_data==x], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['medians'], linewidth=2)
# ax1.set_xlim(.25, 3.75)
# ax1.set_ylim(.5, 1.05)
# # ax1.set_ylim(.95, 1.9)
# ax1.set_xticks([1,2,3])
# ax1.set_xticklabels([1,2,3])
#
#
#
# # # for each trial
# # for x in np.unique(x_data):
# # # plot kde
# # kde = fit_kde(plot_data[x_data>=0], bw=.02) #.2) # .04
# # plot_kde(ax1, kde, plot_data[x_data>=0], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
# #
# # # box and whisker
# # bp = ax1.boxplot([plot_data[x_data>=0], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# # plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['medians'], linewidth=2)
# # ax1.set_xlim(.25, 3.75)
# # ax1.set_ylim(.5, 1.05)
# # # ax1.set_ylim(.95, 1.9)
# # ax1.set_xticks([1,2,3])
# # ax1.set_xticklabels([1,2,3])
# #
# # # jitter the axis
# # scatter_axis = scatter_the_axis_efficiency(plot_data, np.ones_like(plot_data) * (x + c/3 - .2))
# # # plot each trial
# # ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
#
#
# ax1.plot([-1, 4], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
# # save the plot
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.png'), format='png')
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
#
# plt.show()
# print('done')
#
#
#
def plot_efficiency(self):
# initialize parameters
fps = 30
traj_loc = 40
escape_duration = 12 #12 #6
HV_cutoff = .681
ETD = 10
# ax2, fig2, ax3, fig3 = initialize_figures_efficiency(self)
efficiency_data = [[],[],[],[]]
duration_data = [[],[],[],[]]
# initialize arrays for stats
efficiency_data_all = []
duration_data_all = []
prev_homings_data_all = []
all_conditions = []
mouse_ID = []; m = 1
# data_condition = ['naive','experienced']
data_condition = ['escape', 'food']
# data_condition = ['OR - EV', 'OR - HV', 'OF']
# data_condition = ['Obstacle removed (no shelter)', 'obstacle removed', 'acute OR', 'obstacle']
colors = [[0,0,0],[1,0,0]]
#
plot_stuff = True
do_traversals = False
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
print(' - - - -- - - - -- - - - - - - -- - - - - - - - - -')
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_front_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,\
time_exploring_obstacle_post,time_exploring_far_pre,time_exploring_far_post, time_exploring_edge, time_exploring_other_edge, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment:
escape_duration = 12
if 'food' in experiment: escape_duration = 9
# else:escape_duration = 9
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
print(mouse)
# initialize arrays for stats
efficiency_data_mouse = []
duration_data_mouse = []
prev_homings_data_mouse = []
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
if t > 2 and not 'food' in experiment and not 'void' in experiment and not 'dark' in experiment: continue
if 'food' in experiment and condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
if t > 8: continue
# if t > 2: continue
# if 'on off' in experiment and trial: continue
# print(t)
# impose coniditions
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
if (end_idx[trial_num] > escape_duration * fps) or np.isnan(end_idx[trial_num]): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue #25
# get prev edginess
_, _ = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
# only do predict edgy:
# if c == 0:
# if prev_edginess[trial_num] <= HV_cutoff and 'down' in experiment: continue
# elif c == 1:
# if prev_edginess[trial_num] > HV_cutoff and 'down' in experiment: continue
# add data
fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV,num_prev_homings_front_EV, num_prev_homings_other_EV,num_prev_homings_HV,
time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge, time_exploring_other_edge,
self, time, trial, trial_num, trials, edginess, t)
# if edginess[trial_num] < HV_cutoff: continue
if do_traversals:
traversal = self.analysis[experiment][condition]['back traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
x_edge = self.analysis[experiment][condition]['x edge'][mouse][trial]
# if x_edge==25: x_edge = 75
# else: x_edge = 25
spont_edge = []
for trav in traversal[0 * 5 + 0]:
spont_edge.append(trav[0][-1]*scaling_factor)
esc_edge = []
for trav in traversal[1 * 5 + 0]:
esc_edge.append(trav[0][-1]*scaling_factor)
num_prev_homings_EV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
num_prev_homings_HV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
eligible_homings = ~((np.array(traversal[0 * 5 + 2]) > HV_cutoff) * (abs(np.array(spont_edge)-x_edge) > 40)) * (np.array(traversal[0 * 5 + 3]) < 3) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - 15) * 30 * 60)
if np.sum(eligible_homings):
mean_homing = np.mean(np.array(traversal[0 * 5 + 2])[eligible_homings])
else: mean_homing = 0
eligible_escapes = ~((np.array(traversal[1 * 5 + 2]) > HV_cutoff) * (abs(np.array(esc_edge) - x_edge) > 40)) * (np.array(traversal[1 * 5 + 3]) < 3) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - 15) * 30 * 60)
if np.sum(eligible_escapes):
mean_escape = np.mean(np.array(traversal[1 * 5 + 2])[eligible_escapes])
else: mean_escape = 0
prev_edginess[trial_num] = ( mean_homing * np.sum(eligible_homings) + mean_escape * np.sum(eligible_escapes) ) / \
(np.sum(eligible_homings) + np.sum(eligible_escapes))
else:
num_prev_homings_EV[trial_num] = 0
# prev_edginess[trial_num] = 0
if np.isnan(prev_edginess[trial_num]):
prev_edginess[trial_num] = 0
traversal = self.analysis[experiment][condition]['front traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
x_edge = self.analysis[experiment][condition]['x edge'][mouse][trial]
spont_edge = []
for trav in traversal[0 * 5 + 0]:
spont_edge.append(trav[0][-1]*scaling_factor)
esc_edge = []
for trav in traversal[1 * 5 + 0]:
esc_edge.append(trav[0][-1]*scaling_factor)
num_prev_homings_other_EV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60))
else:
num_prev_homings_other_EV[trial_num] = 0
# print(mouse)
# print(trial + 1)
# print(num_prev_homings_EV[trial_num])
# print(num_prev_homings_other_EV[trial_num])
# print(edginess[trial_num])
# print('')
# normalize end idx to
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
# if not RT:
# print(RT)
# continue
RT_all[trial_num] = RT
avg_speed[trial_num] = self.analysis[experiment][condition]['RT path length'][mouse][trial] * scaling_factor / ((end_idx[trial_num] - RT) / fps)
# avg_speed[trial_num] = self.analysis[experiment][condition]['full path length'][mouse][trial] * scaling_factor / (end_idx[trial_num] / fps)
time[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial]
time[trial_num] = (end_idx[trial_num] / fps) / self.analysis[experiment][condition]['optimal path length'][mouse][trial] / scaling_factor * 100
time[trial_num] = abs(50 - x_start)
end_idx[trial_num] = (end_idx[trial_num] / fps - RT) / self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / scaling_factor * 100
# add data for stats
efficiency_data_mouse.append([efficiency_RT[trial_num], trial])
duration_data_mouse.append([end_idx[trial_num], trial]) #TEMP COMMENTING #RT
# duration_data_mouse.append(num_prev_homings_EV[trial_num])
prev_homings_data_mouse.append(num_prev_homings_EV[trial_num])
t += 1
# print(trial+1)
#append data for stats
if efficiency_data_mouse:
efficiency_data_all.append(efficiency_data_mouse)
duration_data_all.append(duration_data_mouse)
prev_homings_data_all.append(prev_homings_data_mouse)
all_conditions.append(data_condition[c])
mouse_ID.append(m); m+= 1
# format end ind
# end_idx = np.array([e/30 for e in end_idx])
end_idx[np.isnan(efficiency)] = np.nan
# loop over data to plot
# for i, (data, data_label) in enumerate(zip([edginess, efficiency_RT, end_idx, RT_all, avg_speed], ['Trajectory'])): #,'Efficiency', 'Duration', 'Reaction Time', 'Speed', 'Trajectory'])): #edginess, 'Trajectory',
# for i, (data, data_label) in enumerate(zip([edginess], ['Trajectory'])):
for i, (data, data_label) in enumerate(zip([end_idx], ['RT duration', 'RT duration', 'Efficiency', 'RT'])): # time, , efficiency_RT, RT_all
# for i, (data, data_label) in enumerate(zip([RT_all], ['Reaction time'])):
# for i, (data, data_label) in enumerate(zip([edginess, efficiency, end_idx], ['Trajectory', 'Efficiency', 'Duration'])):
# for x_data, x_data_label in zip([num_prev_homings], ['Prior homings']):
plot_data = data[~np.isnan(data)]
if False or True:
# for x_data, x_data_label in zip([trials, time, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV, prev_edginess, time_exploring, distance_exploring, time_exploring_far, time_exploring_obstacle, time_exploring_edge, time_exploring_other_edge],
# ['Trials', 'Time', 'Edge vector homings','Other edge vector homings', 'Homing vector homings', 'Mean prior trajectory','Time exploring', 'Distance explored', 'Time exploring far side', 'Time exploring obstacle', 'Time exploring edge', 'Time exploring other edge']):
# for x_data, x_data_label in zip([trials, time, time_exploring_pre, distance_exploring_pre, time_exploring_post, distance_exploring_post,
# time_exploring_far_pre,time_exploring_far_post, time_exploring_obstacle_pre, time_exploring_obstacle_post, time_exploring_other_edge, time_exploring_edge],
# ['Trials', 'Time', 'Time exploring (pre)', 'Distance explored (pre)', 'Time exploring (post)', 'Distance explored (post)',
# 'Time exploring far side (pre)', 'Time exploring far side (post)', 'Time exploring obstacle (pre)', 'Time exploring obstacle (post)',
# 'Time exploring other edge (pre)', 'Time exploring edge (pre)']):
# num_homings_combined = (num_prev_homings_EV>0).astype(int) - (num_prev_homings_HV>0).astype(int)
# num_homings_combined[num_prev_homings_EV==0] = -1
#
# for x_data, x_data_label in zip([time, num_prev_homings_EV>0, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_other_EV>0,
# num_prev_homings_front_EV, num_prev_homings_front_EV>0, prev_edginess, num_prev_homings_HV, num_prev_homings_HV>2, num_homings_combined],
# ['Time', '1 Edge vector homings', 'Edge vector homings','Other edge vector homings','1 other edge vector homings',
# 'Front edge vectors','1 front edge vector', 'Mean prior trajectory', 'Homing vector homings', '1 Homing vector homing', 'Combined homings']):
# for x_data, x_data_label in zip([trials, num_prev_homings_EV>0, num_prev_homings_EV, prev_edginess], ['trial', '1 Edge vector homings', 'Edge vector homings', 'Mean prior trajectory']):
for x_data, x_data_label in zip([trials], ['trial']): # ,edginess>HV_cutoff #, 'edginess'
print('\nCorrelation between ' + data_label + ' and ' + x_data_label)
# only plot escapes
data_for_box_plot = data[~np.isnan(data)]
x_data = x_data[~np.isnan(data)]
print(np.sum(x_data==0))
# get the correlation
r, p = scipy.stats.pearsonr(x_data, data_for_box_plot)
print('r = ' + str(np.round(r, 3)) + '\np = ' + str(p))
if p < .05: print('SIGGY STATDUST')
# m = (((np.mean(x_data) * np.mean(data_for_box_plot)) - np.mean(x_data * data_for_box_plot)) /
# ((np.mean(x_data) ** 2) - np.mean(x_data ** 2)))
# regressed_data = data_for_box_plot - x_data * m
# r, p = scipy.stats.pearsonr(x_data, regressed_data)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
if plot_stuff and not np.isnan(r):
fig1, ax1 = plt.subplots(figsize=(15, 15))
# initialize figure
# fig1, ax1 = plt.subplots(figsize=(9, 9))
plt.title(data_label + ' x ' + x_data_label)
# set up the figure
# if data_label=='Efficiency': ax1.set_ylim([-.03, 1.03])
# elif data_label=='Duration': ax1.set_ylim([-.1, 7])
# if np.max(x_data) < 5:
# ax1.set_xticks(np.unique(x_data).astype(int))
# else:
# ax1.set_xticks(np.arange(5, 25, 5))
# ax1.set_xlim([5,20])
# jitter the axis
scatter_axis = scatter_the_axis_efficiency(plot_data, x_data)
# plot each trial
ax1.scatter(scatter_axis, plot_data, color=colors[0], s=40, alpha=1, edgecolor=colors[0], linewidth=1)
# ax1.scatter(scatter_axis[plot_data > HV_cutoff], plot_data[plot_data > HV_cutoff], color=[0,0,0], s=50, alpha=1, edgecolor=[0, 0, 0], linewidth=1)
# do a linear regression
try:
x_data, prediction = do_linear_regression(plot_data, x_data.astype(int))
except:
print('hi')
# # plot kde
kde = fit_kde(plot_data, bw=.02) #.2) # .04
plot_kde(ax1, kde, plot_data, z=c + .1, vertical=True, normto=.3, color=[.75, .75, .75], violin=False, clip=True) # True)
# plot the linear regression
# ax1.plot(x_data, prediction['Pred'].values, color=colors[0], linewidth=1, linestyle='--', alpha=.7)
# ax1.fill_between(x_data, prediction['lower'].values, prediction['upper'].values, color=colors[0], alpha=.05) # 6
# save the plot
plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.png'), format='png')
plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# plt.show()
# plt.close()
# plot the boxplot
# if data_label == 'Efficiency':
# ax, fig = ax2, fig2
# efficiency_data[c] = plot_data
# elif data_label == 'Duration':
# ax, fig = ax3, fig3
# duration_data[c] = plot_data
# else: continue
# scatter_axis = scatter_the_axis_efficiency(plot_data, np.ones_like(plot_data)*c)
# ax.scatter(scatter_axis, plot_data, color=[0, 0, 0], s=40, alpha=1, edgecolor=[0, 0, 0], linewidth=1)
# # plot kde
# kde = fit_kde(plot_data, bw=.02) #.2) # .04
# plot_kde(ax, kde, plot_data, z=c + .1, vertical=True, normto=.3, color=[.75, .75, .75], violin=False, clip=True) # True)
# # plot errorbar
# median = np.percentile(plot_data, 50)
# third_quartile = np.percentile(plot_data, 75)
# first_quartile = np.percentile(plot_data, 25)
# ax.errorbar(c - .2, median, yerr=np.array([[median - first_quartile], [third_quartile - median]]), color=[0,0,0], capsize=10, capthick=3, alpha=1, linewidth=3)
# ax.scatter(c - .2, median, color=[0,0,0], s=175, alpha=1)
# # save the plot
# fig.savefig(os.path.join(self.summary_plots_folder, data_label + ' comparison - ' + self.labels[c] + '.png'), format='png')
# fig.savefig(os.path.join(self.summary_plots_folder, data_label + ' comparison - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
# test correlation and stats thru permutation test
data_x = prev_homings_data_all
data_y = efficiency_data_all
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all=False)
#
# # do t test
# t, p = scipy.stats.ttest_ind(efficiency_data[0], efficiency_data[1], equal_var=False)
# print('Efficiency: ' + str(p))
# print(np.mean(efficiency_data[0]))
# print(np.mean(efficiency_data[1]))
#
# t, p = scipy.stats.ttest_ind(duration_data[0], duration_data[1], equal_var=False)
# print('Duration: ' + str(p))
# print(np.mean(duration_data[0]))
# print(np.mean(duration_data[1]))
#
efficiency_0 = []
efficiency_more = []
for m, mouse_data in enumerate(efficiency_data_all):
EV_array = np.array(duration_data_all[m])
efficiency_array = np.array(mouse_data)
if 0 in EV_array:
efficiency_0.append(list(efficiency_array[EV_array==0]))
if np.sum(EV_array==0)<len(EV_array):
efficiency_more.append(list(efficiency_array[EV_array>0]))
permutation_test(efficiency_0, efficiency_more, iterations=10000, two_tailed=False)
# group_A = list(np.array(efficiency_data_all)[np.array(all_conditions) == 'OFn'])
# group_B = list(np.array(efficiency_data_all)[np.array(all_conditions) == 'ORe'])
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
#
group_A = list(np.array(duration_data_all)[ | np.array(all_conditions) | numpy.array |
import json
import logging
import os
import torch
import pandas as pd
import numpy as np
from typing import Dict, Any
from src.model import MVAE
from src.dataset import OneHotEncoding, Dataset
JSON_CONTENT_TYPE = "application/json"
logger = logging.getLogger(__name__)
def model_fn(model_dir):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info("Current device: {}".format(device))
logger.info("Loading the model.")
with open(os.path.join(model_dir, "MVAE.pt"), "rb") as f:
model_state = torch.load(f)
model = MVAE(
input_size=model_state["args"]["input_size"],
hidden_dim=model_state["args"]["hidden_dim"]
)
model.load_state_dict(model_state["weights"])
model.to(device).eval()
data_dir = os.environ["SM_CHANNEL_DATA_DIR"]
item_map_df = pd.read_csv(os.path.join(data_dir, "item_map.csv"))
idx2item_map = {idx: item for item, idx in item_map_df[["item_id", "item_idx"]].values}
target_user_hist_df = pd.read_csv(os.path.join(data_dir, "target_user_hist.csv"))
return dict(
model=model,
idx2item_map=idx2item_map,
target_user_hist_df=target_user_hist_df,
)
def input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE):
logger.info("Deserializing the input data.")
if content_type == JSON_CONTENT_TYPE:
return json.loads(serialized_input_data)
raise Exception(f"Requested unsupported ContentType in content_type: {content_type}")
def output_fn(prediction_output, accept=JSON_CONTENT_TYPE):
logger.info("Serializing the generated output.")
if accept == JSON_CONTENT_TYPE:
return json.dumps(prediction_output), accept
raise Exception(f"Requested unsupported ContentType in Accept: {accept}")
def predict_fn(inputs: Dict[str, Any], model: Dict[str, Any]):
idx2item_map = model["idx2item_map"]
target_user_hist_df = model["target_user_hist_df"]
model = model["model"]
user_id = inputs["user_id"]
rec_k = inputs["rec_k"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info("Current device: {}".format(device))
target_user_hist = target_user_hist_df.query(f"user_id=={user_id}")
dataset = Dataset(target_user_hist, transforms=[OneHotEncoding(model.input_size)])
with torch.no_grad():
batch = dataset[0]
inputs = torch.from_numpy(batch["inputs"])
inputs = torch.unsqueeze(inputs, 0)
targets = batch["targets"]
score = model(inputs).detach().cpu().numpy().flatten()
score = score * (1.0 - targets)
indices = np.argpartition(score, -rec_k, axis=-1)[-rec_k:]
score = | np.take_along_axis(score, indices, axis=-1) | numpy.take_along_axis |
from scipy import optimize
import numpy as np
from matplotlib import pyplot as plt
import scipy.integrate as integrate
def curve(x, t):
period = 2 * np.pi / x[1]
if isinstance(t, float):
t = np.array((t,))
y = np.ndarray((t.shape[0],))
for i in range(t.shape[0]):
if t[i] < (period / 4):
y[i] = x[0] * np.sin(t[i] * x[1])
elif t[i] > (period / 4) and t[i] < (1 - (period / 4)):
y[i] = x[0]
elif t[i] > (1 - (period / 4)) and t[i] < (1 + (period / 4)):
y[i] = x[0] * np.sin((t[i] - 1 - period / 2) * x[1])
elif t[i] > (1 + (period / 4)) and t[i] < (2 - (period / 4)):
y[i] = - x[0]
else:
y[i] = x[0] * np.sin((t[i] - 2) * x[1])
return y
def curve_integral(x, t):
integ = np.ndarray((t.shape[0]-1,))
for i in range(t.shape[0]-1):
integ[i] = integrate.quad(lambda t: curve(x, t), 0, t[i+1])[0]
return integ
def generate_up_and_down_bow_target(ns, bow_speed=10, bow_acceleration=0.5):
def objective_function(x, t):
y = curve(x, t)
integ = curve_integral(x,t)
moy = (integ[int(n_points/2)-2] + integ[int(n_points/2)+2])/2
return np.array((bow_acceleration-x[0], bow_speed-x[1], y[-1], y[0], (moy-0.48)*1000))
n_points = 200
t = | np.linspace(0, 2, n_points) | numpy.linspace |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pynufft import NUFFT
import pkg_resources
import scipy.misc
from OCTFrames import FrameManager,cachedOCT
from octReader import OCTManager
import scipy.ndimage
import scipy as S
from matplotlib.widgets import Slider
import attr
@attr.s(kw_only=True)
class RadialVolume:
angle:int = attr.ib(default=2*np.pi)
scan_count:int = attr.ib(default=100)
scan_depth:int = attr.ib(default=800)
diameter:int = attr.ib(default=1000)
def set_data(self,data):
radius = int(self.diameter/2)
volume = data[0,:,:,slice(0,self.scan_depth)]
volume = np.transpose(volume, (1, 0, 2))
vol1 = np.flip(volume[:radius,:,:],axis=0)
vol2 = volume[radius:,:,:]
vol = np.hstack([vol1,vol2])
self.vol = vol
def get_coords(self) -> np.ndarray:
om = np.meshgrid(np.linspace(0,self.angle,self.scan_count*2), np.arange(0,int(self.diameter/2),1)) #rectangular plot of polar data
theta = np.ravel(om[0])
r = | np.ravel(om[1]) | numpy.ravel |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
from PIL import ImageColor
import time
from scipy.stats import norm
import cv2 as cv
import h5py
from keras.models import load_model
from keras import __version__ as keras_version
class TLClassifierSimple(object):
def __init__(self):
# load the model for the traffic light bounding box detection
SSD_GRAPH_FILE = './models/ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb'
detection_graph = self.load_graph(SSD_GRAPH_FILE)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
self.sess = tf.Session(graph=detection_graph)
# Load the model for the traffic light state classification
global keras_version
TL_CNN_H5 = './models/tl_state_classifier/model.h5'
f = h5py.File(TL_CNN_H5, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
# if model_version != keras_version:
# print('You are using Keras version ', keras_version,
# ', but the model was built using ', model_version)
global tl_state_model
tl_state_model = load_model(TL_CNN_H5)
global tl_state_graph
tl_state_graph = tf.get_default_graph()
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score and classes[i] == 10:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def to_image_coords(self, box, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1].
This converts it back to the original coordinate based on the image
size.
"""
box_coords = np.zeros_like(box)
box_coords[0] = box[0] * height
box_coords[1] = box[1] * width
box_coords[2] = box[2] * height
box_coords[3] = box[3] * width
return box_coords
def load_graph(self, graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def crop_box(self, image, box):
bottom, left, top, right = box[...]
return image[int(bottom):int(top), int(left):int(right), :]
def detect_tl_circles(self, img):
height = img.shape[0]
img = img[:, :, ::-1].copy() # convert to BGR
gimg = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gimg = cv.medianBlur(gimg, 5)
circles = cv.HoughCircles(gimg, cv.HOUGH_GRADIENT, 1, int(height * 0.25),
param1=50, param2=30, minRadius=0, maxRadius=0)
if circles is not None:
circles = np.uint16(np.around(circles))[0]
return circles
def sort_circles_by_y(self, circles):
if circles is not None:
if len(circles) == 3:
# determine top, middle and bottom circle w.r.t. y-coord
top_idx = np.argmin(circles[:, 1])
top_circ = circles[top_idx]
circles = np.delete(circles, top_idx, axis=0)
mid_idx = np.argmin(circles[:, 1])
mid_circ = circles[mid_idx]
circles = np.delete(circles, mid_idx, axis=0)
bot_circ = circles[0]
return top_circ, mid_circ, bot_circ
return None, None, None
def apply_color_threshold(self, img):
img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
img = cv.medianBlur(img, 5)
# RED
lower_red = np.array([0, 50, 50])
upper_red = np.array([30, 255, 255])
mask = cv.inRange(img, lower_red, upper_red)
res = cv.bitwise_and(img, img, mask=mask)
# since the H value is circular and red les between 160 and 30,
# we have to deal with this here
lower_red_1 = np.array([160, 50, 50])
upper_red_1 = np.array([180, 255, 255])
mask = cv.inRange(img, lower_red_1, upper_red_1)
res_1 = cv.bitwise_and(img, img, mask=mask)
res_red = cv.bitwise_or(res, res_1)
# YELLOW
lower_yellow = np.array([20, 50, 50])
upper_yellow = | np.array([30, 255, 255]) | numpy.array |
# Aggregate together statistics from different independent DGA runs. Also compare with DNS results for a unified UQ pipeline.
import numpy as np
import time
from numpy import save,load
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size'] = 40
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['savefig.bbox'] = 'tight'
matplotlib.rcParams['savefig.pad_inches'] = 0.2
font = {'family': 'serif', 'size': 25,}
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pickle
import sys
import subprocess
import os
from os import mkdir
from os.path import join,exists
from shutil import copyfile
codefolder = "/home/jf4241/SHORT"
os.chdir(codefolder)
from model_obj import Model
from hm_model import HoltonMassModel
import hm_params
import helper
from data_obj import Data
import function_obj
from tpt_obj import TPT
# ----------- Make folders ----------
datafolder = "/scratch/jf4241/SHORT_holtonmass"
simfolder = join(datafolder,"runs")
resultfolder = join(datafolder,"results")
dayfolder = join(resultfolder,"2021-12-07")
expfolder = join(dayfolder,"0")
# -----------------------------------
algo_params,algo_param_string = hm_params.get_algo_params()
physical_params,physical_param_string = hm_params.get_physical_params()
# Set savefolder accordingly
physical_param_folder = join(expfolder,physical_param_string)
def compile_generalized_rates_dga(model,tpt_file_list,hm_params,algo_params,savefolder):
# For each TPT output file, load it in and read the generalized rates
Ntpt = len(tpt_file_list)
keys = list(model.dam_dict.keys())
Nkeys = len(keys)
Nmom = algo_params["num_moments"]
rates_dga = dict()
for k in range(Nkeys):
rates_dga[keys[k]] = {"ab": np.zeros((Ntpt,Nmom+1)), "ba": np.zeros((Ntpt,Nmom+1))}
for i in range(Ntpt):
tpt = pickle.load(open(tpt_file_list[i],"rb"))
for k in range(Nkeys):
rates_dga[keys[k]]["ab"][i,:] = tpt.dam_moments[keys[k]]['rate_ab']
rates_dga[keys[k]]["ba"][i,:] = tpt.dam_moments[keys[k]]['rate_ba']
if i == 0:
# Extract DNS info
dam_dns = tpt.dam_emp
long_from_label = tpt.long_from_label
long_to_label = tpt.long_to_label
t_long,x_long = model.load_long_traj(tpt.long_simfolder)
del x_long
del tpt
ab_reactive_flag = 1*(long_from_label==-1)*(long_to_label==1)
ba_reactive_flag = 1*(long_from_label==1)*(long_to_label==-1)
num_rxn = np.sum(np.diff(ab_reactive_flag)==1)
num_rxn = min(num_rxn,np.sum(np.diff(ba_reactive_flag)==1))
print("num_rxn = {}".format(num_rxn))
ab_starts = np.where(np.diff(ab_reactive_flag)==1)[0] + 1
ab_ends = np.where(np.diff(ab_reactive_flag)==-1)[0] + 1
ba_starts = np.where(np.diff(ba_reactive_flag)==1)[0] + 1
ba_ends = np.where(np.diff(ba_reactive_flag)==-1)[0] + 1
num_rxn = np.sum(np.diff(ab_reactive_flag)==1)
num_rxn = min(num_rxn,np.sum(np.diff(ba_reactive_flag)==1))
num_rxn -= 1 # For the periods
Nt = len(t_long)
dt = t_long[1] - t_long[0]
print("num_rxn = {}".format(num_rxn))
# ------------------- Generalized rates ---------------------
# Look at time from one ab_start to the next, and treat that as the IID random variables. Do a bootstrap estimate
Nboot = 500
Nab = num_rxn #len(ab_starts)
Nba = num_rxn #len(ba_starts)
ret = {'ab': np.diff(ab_starts)*dt, 'ba': np.diff(ba_starts)*dt} # Time of each one. The fundamental unit.
genrate_dns = {}
for key in keys:
genrate_dns[key] = {}
for dirn in ['ab','ba']:
np.random.seed(1)
genrate_dns[key][dirn] = {"mean": np.array([np.sum(dam_dns[key][dirn]**k)/np.sum(ret[dirn]) for k in range(Nmom+1)])}
genrate_bootstrap = np.zeros((Nboot,Nmom+1))
for i in range(Nboot):
idx = np.random.choice(np.arange(Nab),size=Nab,replace=True)
genrate_bootstrap[i] = np.array([np.sum(dam_dns[key][dirn][idx]**k)/np.sum(ret[dirn][idx]) for k in range(Nmom+1)])
genrate_dns[key][dirn]["rmse"] = np.sqrt(np.mean((genrate_bootstrap - genrate_dns[key][dirn]["mean"])**2, axis=0))
genrate_dga = {}
for key in keys:
genrate_dga[key] = {}
for dirn in ['ab','ba']:
genrate_dga[key][dirn] = {"mean": np.mean(rates_dga[key][dirn], axis=0)}
genrate_dga[key][dirn]["rmse"] = np.std(rates_dga[key][dirn], axis=0)
genrate_dga[key][dirn]["min"] = | np.min(rates_dga[key][dirn], axis=0) | numpy.min |
import numpy as np
from scipy import interpolate,signal
import matplotlib.pyplot as plt
import math
from scipy.io import wavfile
import timeit
class FDSAF:
def __init__( self,filterlen):
self.M = filterlen
#self.w_f = np.fft.fft(np.concatenate((np.ones(1),np.zeros(2*self.M-1))))
self.w_f = np.fft.fft(np.zeros(2*self.M))
#self.w_f = np.fft.fft(np.concatenate((np.ones(self.M)/self.M,np.zeros(self.M))))
self.last_buffer = | np.zeros(self.M, dtype='float') | numpy.zeros |
import socket
import time, threading
import tkinter as tk
import tkinter.ttk as ttk
import numpy as np
import struct
from queue import Queue
from PIL import ImageTk, Image
import robot
import psutil
import io
import sys
import math
import cv2 as cv
np.set_printoptions(threshold=sys.maxsize)
from threading import Thread
def rgb2gray(rgb):#rgb转换灰度
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def gfunc(x,y,sigma):#高斯函数
return (math.exp(-(x**2 + y**2)/(2*(sigma**2))))/(2*3.14*(sigma**2))
def gaussFilter(size, sigma):#高斯滤波器
out = np.zeros(size)
for i in range(size[0]):
for j in range(size[1]):
out[i,j] = gfunc(i-size[0]//2,j-size[1]//2, sigma )
return out/np.sum(out)
#FLANN模板匹配算法
def template_match(img,query):
sift = cv.SIFT_create()
# numpy数组到opencv矩阵转换
img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR)
query = cv.cvtColor(np.array(query), cv.COLOR_RGB2BGR)
# SIFT查找关键特征点和距离
kp1, des1 = sift.detectAndCompute(img, None)
kp2, des2 = sift.detectAndCompute(query, None)
# FLANN 参数
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv.FlannBasedMatcher(index_params, search_params)
# FLANN 匹配相似像素点
matches = flann.knnMatch(des1, des2, k=2)
# 判断是否满足相似条件的相似矩阵
matchesMask = [[0, 0] for i in range(len(matches))]
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1, 0]
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)
img3 = cv.drawMatchesKnn(img, kp1, query, kp2, matches, None, **draw_params)
img3 = cv.cvtColor(img3, cv.COLOR_BGR2RGB)
return img3
def covFilter(img,filter): #滤波器卷积函数
img_arr = np.array(img)
#re_arr = np.zeros_like(img_arr)
re_arr = np.copy(img_arr)
center_x = filter.shape[0]//2 #滤波器中心点
center_y = filter.shape[1]//2 #滤波器中心点
img_arr = np.pad(img_arr, ((center_x,center_x), (center_y,center_y), (0, 0)))#padding操作
re_arr = re_arr.astype(np.float64)
img_arr = img_arr.astype(np.float64)
re_arr =re_arr/255.0 #像素值转换0~255到0~1
img_arr =img_arr/255.0 #像素值转换0~255到0~1
re_arr = filter[center_x,center_y] * re_arr
for x in range(0,filter.shape[0]):
for y in range(0,filter.shape[1]):
if x!=center_x and y!=center_y:
re_arr = re_arr + filter[x,y] * img_arr[x:x+re_arr.shape[0],y:y+re_arr.shape[1],:]
re_arr = re_arr.astype(np.float64)
re_arr =re_arr*255.0
re_arr = re_arr.astype(np.uint8)
"""
for x in range(re_arr.shape[0]):
for y in range(re_arr.shape[1]):
re_arr[x,y,:] = np.mean(img_arr[x:x+9,y:y+9,:],axis=(0, 1))
print(re_arr.dtype)
"""
return re_arr
#均值滤波器
def MeanFilter(img):
img_arr = np.array(img)
#re_arr = np.zeros_like(img_arr)
re_arr = np.copy(img_arr)
img_arr = np.pad(img_arr, ((4,4), (4,4), (0, 0)))
re_arr = re_arr.astype(np.float64)
img_arr = img_arr.astype(np.float64)
re_arr =re_arr/255.0
img_arr =img_arr/255.0 #像素值转换0~255到0~1
for x in range(0,9):
for y in range(0,9):
if x!=4 and y!=4:
re_arr = re_arr + img_arr[x:x+re_arr.shape[0],y:y+re_arr.shape[1],:]
re_arr = re_arr.astype(np.float64)
re_arr =re_arr/81.0*255.0 #平均值操作
re_arr = re_arr.astype(np.uint8)
"""
for x in range(re_arr.shape[0]):
for y in range(re_arr.shape[1]):
re_arr[x,y,:] = np.mean(img_arr[x:x+9,y:y+9,:],axis=(0, 1))
print(re_arr.dtype)
"""
return re_arr
#Harris角点
def find_harris_corners(input_img, k=0.04, window_size=5, threshold = 10000.00):
output_img = | np.copy(input_img) | numpy.copy |
import numpy as np
from scipy import integrate
from scipy import interpolate
# Cosmological parameters
Om0 = 0.272
Ol0 = 1.0 - Om0
h = 0.704
ns = 0.961
sigma80 = 0.807
SPEEDOFLIGHT_KMS = 2.99792458e5
def nhat(alpha, delta):
nhat = np.zeros(3)
nhat[0] = np.cos(delta) * np.cos(alpha)
nhat[1] = np.cos(delta) * np.sin(alpha)
nhat[2] = np.sin(delta)
return nhat
def angsep(alpha1, alpha2, delta1, delta2):
cos_ang = np.sin(delta1)*np.sin(delta2) + np.cos(delta1)*np.cos(delta2)*np.cos(alpha1-alpha2)
ang = np.arccos(cos_ang)
return ang
class cosmo:
def __init__(self, Om0=Om0, h=h, ns=ns, sigma80=sigma80, **kwargs):
self.Om0 = Om0
self.Ol0 = 1.0 - self.Om0
self.Ob0 = 0.045
self.Tcmb0 = 2.7255
self.h = h
self.ns = ns
self.sigma80 = sigma80
self.H0 = 100.0 # [h km/s/Mpc]
self.q0 = 0.5*self.Om0 - self.Ol0
self.gamma = 0.55 # growth index
self._As = None
self._sigmav = None
self.log_xi_perp_interpolator = None
self.xi_para_interpolator = None
# Eisenstein & Hu (1998) zero baryon transfer function parameters
ombom0 = self.Ob0 / self.Om0 # shorthand
om0h2 = self.Om0 * self.h**2
ombh2 = self.Ob0 * self.h**2
self.theta2p7 = self.Tcmb0 / 2.7
# Equation 31
alphaGamma = 1.0 - 0.328*np.log(431.0*om0h2)*ombom0 + 0.38*np.log(22.3*om0h2)*ombom0**2
# Quantities for Equation 30 (computed in transferFunction)
self.Gamma1 = self.Om0*self.h*alphaGamma
self.Gamma2 = self.Om0*self.h*(1.0-alphaGamma)
# Equation 26
self.s_EH98 = 44.5*np.log(9.83/om0h2) / np.sqrt(1.0+10.0*ombh2**0.75)
# halofit spectral parameters
self.rknl = None
self.rneff = None
self.rncur = None
@property
def dH(self):
return (SPEEDOFLIGHT_KMS)/self.H0 * 1e3 # c/H_0 [h^-1 kpc]
def E_Hub(self, z):
"""
Computes E(z) = H(z)/H0
"""
E2 = self.Om0*(1.+z)**3 + self.Ol0
if np.all(E2 > 0.0):
return np.sqrt(E2)
else:
return np.NaN
def Omega_m(self, z):
"""
Evolution of omega matter with redshift
"""
EH = self.E_Hub(z)
return self.Om0*(1.+z)**3 / EH**2
def Omega_v(self, z):
"""
Evolution of omega vacuum with redshift
"""
EH = self.E_Hub(z)
return self.Ol0 / EH**2
def chi(self, z, use_lowz=False):
"""
Computes the comoving distance in units h^-1 kpc
"""
def _integrand(z):
return 1.0/self.E_Hub(z) # 1/E(z) = H0/H(z)
if use_lowz: # if z<<1
return self.dH * (z - 0.5*(1.+self.q0)*z**2)
else:
if np.isclose(z, 0.0):
return 0.0
zp1 = z + 1.0
if np.isfinite(_integrand(z)): # prevent negative square roots
if np.isclose(self.Om0, 1.0): # EdS
return 2.*zp1*(1.-1./np.sqrt(zp1)) * self.dH
elif np.isclose(self.Ol0, 1.0): # dS
return z * self.dH
else:
y,err = integrate.quad(_integrand, 0.0, z, epsabs=1e-8)
return y * self.dH
else:
return float(1e7)
def chi_lowz(self, z): # accepts array input for z
return self.dH*(z - 0.5*(1.+self.q0)*z**2)
def ztot(self, z, v=0.0):
return (1.0 + z) * (1.0 + v/SPEEDOFLIGHT_KMS) - 1.0
def kappa_v(self, z, v=0.0, use_lowz=False):
dA_bar = self.chi(z, use_lowz) / (1.+z)
dH = self.dH/self.E_Hub(z)
return (1.0 - dH/dA_bar) * (v/SPEEDOFLIGHT_KMS)
def dA(self, z, v=0.0, use_lowz=False):
"""
Computes angular diameter distance in units h^-1 kpc
"""
ret = self.chi(z, use_lowz) / (1.+z)
if v == 0.0:
ret *= 1.0
else:
ret *= 1.0 - self.kappa_v(z, v, use_lowz)
return ret
def transferFunction(self, k):
"""
The zero-baryon transfer function according to Eisenstein & Hu 1998.
This fitting function is significantly simpler than the full version
and still approximates numerical calculations from a Boltzmann code
to better than 10%, and almost as accurate when computing the variance
or correlation function (see the Colossus code paper for details).
"""
kh = k*self.h # convert kh from hMpc^-1 to Mpc^-1
# Equation 30
Gamma = self.Gamma1 + self.Gamma2 / (1.0 + (0.43*kh*self.s_EH98)**4)
# Equation 28
q = k * self.theta2p7 * self.theta2p7 / Gamma
# Equation 29
C0 = 14.2 + 731.0 / (1.0 + 62.5*q)
L0 = np.log(2.0*np.exp(1.0) + 1.8*q)
Tk = L0 / (L0 + C0*q*q)
return Tk
def growthFactor(self, z): # D(a)
return 1.0
def growthFactor_approx(self, z):
# The Carroll, Press & Turner (1992) approximation, eq. 29 for g(Omega)=D/a
om_m = self.Omega_m(z)
om_v = self.Omega_v(z)
g = 2.5*om_m/(om_m**(4./7.)-om_v+(1.+om_m/2.)*(1.+om_v/70.))
g0 = 2.5*self.Om0/(self.Om0**(4./7.)-self.Ol0+(1.+self.Om0/2.)*(1.+self.Ol0/70.))
return g/g0/(1.+z) # D
def matterPowerSpectrum(self, k, z=0.0):
"""
The (linear) matter power spectrum at scale k
k has units h/Mpc so P(k) has units of [k^-3] i.e. (Mpc/h)^3
"""
T = self.transferFunction(k)
D = self.growthFactor(z)
Pk = self.As * D * D * T * T * k**self.ns
return Pk
def Delta2_L(self, k, z=0.0):
"""
Linear dimensionless matter power spectrum
"""
return k**3 * self.matterPowerSpectrum(k,z) / (2.*np.pi**2)
@property
def As(self): # scalar amplitude A_s of matter power spectrum
if self._As is None:
sigma80_int = self._sigmaExact()
self._As = (self.sigma80 / sigma80_int)**2
return self._As
def _sigmaExact(self):
"""
This computes the integral of sqrt[(sigma_80)^2 / A_s].
The infinite integral over k often causes trouble when the tophat filter is used.
Thus we determine sensible limits and integrate over a finite k-volume.
"""
def _integrand(lnk):
k = np.exp(lnk)
x = k * 8.0
if x < 1e-3:
W = 1.0
else:
W = 3.0 / x**3 * (np.sin(x) - x * np.cos(x)) # FT of tophat filter
T = self.transferFunction(k)
P_unnorm = T * T * k**self.ns # equal to P(k)/A_s
ret = P_unnorm * W**2 * k**3 # one factor of k due to the integration in log-k space
return ret
lnk_min, lnk_max = self._get_lnk_limits(_integrand)
sigma2, _ = integrate.quad(_integrand, lnk_min, lnk_max, epsabs=1e-9, limit=100)
sigma = np.sqrt(sigma2 / 2.0 / np.pi**2)
if np.isnan(sigma):
# raise Exception("Result is nan")
print('sigma integral is NaN')
print('with parameters Om0={}, sigma8={}'.format(self.Om0,self.sigma80))
return sigma
def _sep(self, coord_obj1, coord_obj2, use_lowz=False):
"""
Computes the comoving seperation between two points and
the angles made by the two lines of sight and the connecting
line.
Parameters
-------------------------------------------------------
coord_obj1: array-like e.g. 3-tuple (z,RA,DEC)
coord_obj2: array-like e.g. 3-tuple (z,RA,DEC)
The angular coordinates RA and DEC are in degrees.
Returns
-------------------------------------------------------
(r,theta1,theta2): 3-tuple
r is the comoving seperation (Mpc/h)
theta1(2) in radians is the seperation angle between the
LOS of object 1(2) and the connecting line.
Notes
-------------------------------------------------------
rhat is directed from point 1 to point 2
"""
deg2rad = np.pi/180
z1, RA1, DEC1 = coord_obj1
z2, RA2, DEC2 = coord_obj2
alpha1 = RA1 * deg2rad
alpha2 = RA2 * deg2rad
delta1 = DEC1 * deg2rad
delta2 = DEC2 * deg2rad
nhat1 = nhat(alpha1, delta1)
nhat2 = nhat(alpha2, delta2)
xvec1 = self.chi(z1, use_lowz) * 1e-3 * nhat1 # since chi in kpc/h and want Mpc/h
xvec2 = self.chi(z2, use_lowz) * 1e-3 * nhat2
rvec = xvec2 - xvec1
r = np.sqrt(np.dot(rvec,rvec))
if r < 1e-14:
theta1 = np.pi/2
theta2 = np.pi/2
else:
rhat = rvec/r
theta1 = np.arccos(np.dot(rhat,nhat1))
theta2 = np.arccos(np.dot(rhat,nhat2))
return r, theta1, theta2 # units radians and Mpc/h
def xiV_perp(self, r):
def _integrand_perp(lnk, r):
k = np.exp(lnk)
Pk = self.matterPowerSpectrum(k)
x = k * r
if x < 1e-3:
Kperp = 1/3.
else:
j1 = | np.sin(x) | numpy.sin |
# Python imports
import itertools
import os
from itertools import compress
from xml.etree import ElementTree as ET
# Third-party imports
import numpy
import numpy as np
# PyCSEP imports
from csep.utils.calc import bin1d_vec, cleaner_range, first_nonnan, last_nonnan
from csep.utils.scaling_relationships import WellsAndCoppersmith
from csep.models import Polygon
def california_relm_collection_region(dh_scale=1, magnitudes=None, name="relm-california-collection", use_midpoint=True):
""" Return collection region for California RELM testing region
Args:
dh_scale (int): factor of two multiple to change the grid size
mangitudes (array-like): array representing the lower bin edges of the magnitude bins
name (str): human readable identifer
use_midpoints (bool): if true, treat values in file as midpoints. default = true.
Returns:
:class:`csep.core.spatial.CartesianGrid2D`
Raises:
ValueError: dh_scale must be a factor of two
"""
if dh_scale % 2 != 0 and dh_scale != 1:
raise ValueError("dh_scale must be a factor of two or dh_scale must equal unity.")
# we can hard-code the dh because we hard-code the filename
dh = 0.1
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'RELMCollectionArea.dat')
points = numpy.loadtxt(filepath)
if use_midpoint:
origins = numpy.array(points) - dh / 2
else:
origins = numpy.array(points)
if dh_scale > 1:
origins = increase_grid_resolution(origins, dh, dh_scale)
dh = dh / dh_scale
# turn points into polygons and make region object
bboxes = compute_vertices(origins, dh)
relm_region = CartesianGrid2D([Polygon(bbox) for bbox in bboxes], dh, name=name)
if magnitudes is not None:
relm_region.magnitudes = magnitudes
return relm_region
def california_relm_region(dh_scale=1, magnitudes=None, name="relm-california", use_midpoint=True):
"""
Returns class representing California testing region.
This region can
be used to create gridded datasets for earthquake forecasts. The XML file appears to use the
midpoint, and the .dat file uses the origin in the "lower left" corner.
Args:
dh_scale: can resample this grid by factors of 2
Returns:
:class:`csep.core.spatial.CartesianGrid2D`
Raises:
ValueError: dh_scale must be a factor of two
"""
if dh_scale % 2 != 0 and dh_scale != 1:
raise ValueError("dh_scale must be a factor of two or dh_scale must equal unity.")
# use default file path from python package
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'csep-forecast-template-M5.xml')
csep_template = os.path.expanduser(filepath)
points, dh = parse_csep_template(csep_template)
if use_midpoint:
origins = numpy.array(points) - dh / 2
else:
origins = numpy.array(points)
if dh_scale > 1:
origins = increase_grid_resolution(origins, dh, dh_scale)
dh = dh / dh_scale
# turn points into polygons and make region object
bboxes = compute_vertices(origins, dh)
relm_region = CartesianGrid2D([Polygon(bbox) for bbox in bboxes], dh, name=name)
if magnitudes is not None:
relm_region.magnitudes = magnitudes
return relm_region
def italy_csep_region(dh_scale=1, magnitudes=None, name="csep-italy", use_midpoint=True):
"""
Returns class representing Italian testing region.
This region can be used to create gridded datasets for earthquake forecasts. The region is defined by the
file 'forecast.italy.M5.xml' and contains a spatially gridded region with 0.1° x 0.1° cells.
Args:
dh_scale: can resample this grid by factors of 2
magnitudes (array-like): bin edges for magnitudes. if provided, will be bound to the output region class.
this argument provides a short-cut for creating space-magnitude regions.
name (str): human readable identify given to the region
use_midpoint (bool): if true, treat values in file as midpoints. default = true.
Returns:
:class:`csep.core.spatial.CartesianGrid2D`
Raises:
ValueError: dh_scale must be a factor of two
"""
if dh_scale % 2 != 0 and dh_scale != 1:
raise ValueError("dh_scale must be a factor of two or dh_scale must equal unity.")
# use default file path from python package
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'forecast.italy.M5.xml')
csep_template = os.path.expanduser(filepath)
points, dh = parse_csep_template(csep_template)
if use_midpoint:
origins = numpy.array(points) - dh / 2
else:
origins = numpy.array(points)
if dh_scale > 1:
origins = increase_grid_resolution(origins, dh, dh_scale)
dh = dh / dh_scale
# turn points into polygons and make region object
bboxes = compute_vertices(origins, dh)
italy_region = CartesianGrid2D([Polygon(bbox) for bbox in bboxes], dh, name=name)
if magnitudes is not None:
italy_region.magnitudes = magnitudes
return italy_region
def italy_csep_collection_region(dh_scale=1, magnitudes=None, name="csep-italy-collection", use_midpoint=True):
""" Return collection region for Italy CSEP collection region
Args:
dh_scale (int): factor of two multiple to change the grid size
mangitudes (array-like): array representing the lower bin edges of the magnitude bins
name (str): human readable identifer
use_midpoint (bool): if true, treat values in file as midpoints. default = true.
Returns:
:class:`csep.core.spatial.CartesianGrid2D`
Raises:
ValueError: dh_scale must be a factor of two
"""
if dh_scale % 2 != 0 and dh_scale != 1:
raise ValueError("dh_scale must be a factor of two or dh_scale must equal unity.")
# we can hard-code the dh because we hard-code the filename
dh = 0.1
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'italy.collection.nodes.dat')
points = numpy.loadtxt(filepath)
if use_midpoint:
origins = numpy.array(points) - dh / 2
else:
origins = numpy.array(points)
if dh_scale > 1:
origins = increase_grid_resolution(origins, dh, dh_scale)
dh = dh / dh_scale
# turn points into polygons and make region object
bboxes = compute_vertices(origins, dh)
relm_region = CartesianGrid2D([Polygon(bbox) for bbox in bboxes], dh, name=name)
if magnitudes is not None:
relm_region.magnitudes = magnitudes
return relm_region
def global_region(dh=0.1, name="global", magnitudes=None):
""" Creates a global region used for evaluating gridded forecasts on the global scale.
The gridded region corresponds to the
Args:
dh:
Returns:
csep.utils.CartesianGrid2D:
"""
# generate latitudes
lons = cleaner_range(-180.0, 179.9, dh)
lats = cleaner_range(-90, 89.9, dh)
coords = itertools.product(lons,lats)
region = CartesianGrid2D([Polygon(bbox) for bbox in compute_vertices(coords, dh)], dh, name=name)
if magnitudes is not None:
region.magnitudes = magnitudes
return region
def magnitude_bins(start_magnitude, end_magnitude, dmw):
""" Returns array holding magnitude bin edges.
The output from this function is monotonically increasing and equally spaced bin edges that can represent magnitude
bins.
Args:
start_magnitude (float)
end_magnitude (float)
dmw (float): magnitude spacing
Returns:
bin_edges (numpy.ndarray)
"""
# convert to integers to prevent accumulating floating point errors
const = 10000
start = numpy.floor(const * start_magnitude)
end = numpy.floor(const * end_magnitude)
d = const * dmw
return numpy.arange(start, end + d / 2, d) / const
def create_space_magnitude_region(region, magnitudes):
"""Simple wrapper to create space-magnitude region """
if not isinstance(region, CartesianGrid2D):
raise TypeError("region must be CartesianGrid2D")
# bind to region class
if magnitudes is None:
raise ValueError("magnitudes should not be None if creating space-magnitude region.")
region.magnitudes = magnitudes
region.num_mag_bins = len(region.magnitudes)
return region
def parse_csep_template(xml_filename):
"""
Reads CSEP XML template file and returns the lat/lon values
for the forecast.
Returns:
list of tuples where tuple is (lon, lat)
"""
tree = ET.parse(xml_filename)
root = tree.getroot()
points = []
for cell in root.iter('{http://www.scec.org/xml-ns/csep/forecast/0.1}cell'):
points.append((float(cell.attrib['lon']), float(cell.attrib['lat'])))
# get cell spacing
data = root.find('{http://www.scec.org/xml-ns/csep/forecast/0.1}forecastData')
dh_elem = data.find('{http://www.scec.org/xml-ns/csep/forecast/0.1}defaultCellDimension')
dh_lat = float(dh_elem.attrib['latRange'])
dh_lon = float(dh_elem.attrib['lonRange'])
if not numpy.isclose(dh_lat, dh_lon):
raise ValueError("dh_lat must equal dh_lon. grid needs to be regular.")
return points, dh_lat
def increase_grid_resolution(points, dh, factor):
"""
Takes a set of origin points and returns a new set with higher grid resolution. assumes the origin point is in the
lower left corner. the new dh is dh / factor. This implementation requires that the decimation factor be a multiple of 2.
Args:
points: list of (lon,lat) tuples
dh: old grid spacing
factor: amount to reduce
Returns:
points: list of (lon,lat) tuples with spacing dh / scale
"""
# short-circuit recursion
if factor == 1:
return points
# handle edge cases
assert factor % 2 == 0
assert factor >= 1
# first start out
new_points = set()
new_dh = dh / 2
for point in points:
bbox = compute_vertex(point, new_dh)
for pnt in bbox:
new_points.add(pnt)
# call function again with new_points, new_dh, new_factor
new_factor = factor / 2
return increase_grid_resolution(list(new_points), new_dh, new_factor)
def masked_region(region, polygon):
"""
Build a new region based off the coordinates in the polygon.
Args:
region: CartesianGrid2D object
polygon: Polygon object
Returns:
new_region: CartesianGrid2D object
"""
# contains is true if spatial cell in region is inside the polygon
contains = polygon.contains(region.midpoints())
# compress only returns elements that are true, effectively removing elements outside of the polygons
new_polygons = list(compress(region.polygons, contains))
# create new region with the spatial cells inside the polygon
return CartesianGrid2D(new_polygons, region.dh)
def generate_aftershock_region(mainshock_mw, mainshock_lon, mainshock_lat, num_radii=3, region=california_relm_region, **kwargs):
""" Creates a spatial region around a given epicenter
The method uses the Wells and Coppersmith scaling relationship to determine the average fault length and creates a
circular region centered at (mainshock_lon, mainshock_lat) with radius = num_radii.
Args:
mainshock_mw (float): magnitude of mainshock
mainshock_lon (float): epicentral longitude
mainshock_lat (float): epicentral latitude
num_radii (float/int): number of radii of circular region
region (callable): returns :class:`csep.utils.spatial.CartesianGrid2D`
**kwargs (dict): passed to region callable
Returns:
:class:`csep.utils.spatial.CartesianGrid2D`
"""
rupture_length = WellsAndCoppersmith.mag_length_strike_slip(mainshock_mw) * 1000
aftershock_polygon = Polygon.from_great_circle_radius((mainshock_lon, mainshock_lat),
num_radii * rupture_length, num_points=100)
aftershock_region = masked_region(region(**kwargs), aftershock_polygon)
return aftershock_region
def grid_spacing(vertices):
"""
Figures out the length and
Args:
vertices: Vertices describe a single node in grid.
Returns:
dh: grid spacing
Raises:
ValueError
"""
# get first two vertices
a = vertices[0]
b = vertices[1]
# compute both differences, because unless point is the same one is bound to be the dh
d1 = numpy.abs(b[0] - a[0])
d2 = numpy.abs(b[1] - a[1])
if not numpy.allclose(d1, d2):
raise ValueError("grid spacing must be regular for cartesian grid.")
dh = | numpy.max([d1, d2]) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME> and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import time
import numpy as np
from scipy.stats.stats import pearsonr
import torch
from utils import load_graph, preprocess_adj, normalized_laplacian
from utils import rescale_laplacian, chebyshev_polynomial
def mics_z_norm(train_y, valid_y, test_y):
'''z normalize y of training, validation and test set based on training set
Args:
train_y (ndarray): training y data
valid_y (ndarray): validation y data
test_y (ndarray): testing y data
Returns:
Tuple: contains z-normed y data and std of training y data
'''
# subtract mean of y of training set
t_mu = train_y.mean(axis=0, keepdims=True)
train_y = train_y - t_mu
valid_y = valid_y - t_mu
test_y = test_y - t_mu
# divide std of y of training set
t_sigma = train_y.std(axis=0)
train_y = train_y / t_sigma[np.newaxis, :]
valid_y = valid_y / t_sigma[np.newaxis, :]
test_y = test_y / t_sigma[np.newaxis, :]
# return processed y and std for future MAE calculation
return [train_y, valid_y, test_y, t_sigma]
def mics_z_norm_gcnn(input_y, train_mask):
"""z normalize y based on training data
Args:
input_y (ndarray): y data
train_mask (ndarray): mask of training data
Returns:
Tuple: contains z-normed y data and std of training y data
"""
# get mean and std of training data
y_tra = input_y[train_mask, :]
t_mu = y_tra.mean(axis=0)
t_sigma = y_tra.std(axis=0)
# perform z-norm
input_y = input_y - t_mu[np.newaxis, :]
input_y = input_y / t_sigma[np.newaxis, :]
return [input_y, t_sigma]
def mics_z_norm_test(train_valid_y, test_y):
"""z normalize y test set based on training data for HCP dataset
Args:
train_valid_y (list): list of y data for both training and validation
test_y (ndarray): test y data
Returns:
Tuple: z normed test y data, and std of training y data
"""
base_y = np.vstack(train_valid_y)
t_v_mu = base_y.mean(axis=0)
test_y = test_y - t_v_mu[np.newaxis, :]
t_v_sigma = base_y.std(axis=0)
test_y = test_y / t_v_sigma[np.newaxis, :]
return test_y, t_v_sigma
def mics_train_valid_split(train_valid_x,
train_valid_y,
fold=None,
is_bnc=False):
"""split training and validation data (HCP only)
Args:
train_valid_x (list): list of y data for both training and validation
train_valid_y (list): list of x data for both training and validation
fold (int, optional): index of fold for validation, if None, no
validation is going to be returned
is_bnc (bool, optional): whether function is used for brainnetcnn
Returns:
Tuple: if fold is None, all data in list train_valid_x and y are
combined as training x and y. If fold is not None, the
corresponding fold is returned as validation data, while the
remaining folds are combined as training data.
"""
if fold is not None:
valid_index = fold
valid_x = train_valid_x[valid_index]
valid_y = train_valid_y[valid_index]
train_valid_x = np.delete(train_valid_x, valid_index, axis=0)
train_valid_y = np.delete(train_valid_y, valid_index, axis=0)
tmp = list(train_valid_x[0].shape)
tmp[0] = 0
train_x = np.zeros(tmp)
train_y = np.zeros((0, train_valid_y[0].shape[-1]))
for i in range(len(train_valid_x)):
train_x = np.concatenate((train_x, train_valid_x[i]), axis=0)
train_y = np.concatenate((train_y, train_valid_y[i]), axis=0)
if is_bnc:
train_x = np.expand_dims(train_x, axis=-1)
if fold is not None:
if is_bnc:
valid_x = np.expand_dims(valid_x, axis=-1)
t_mu = train_y.mean(axis=0)
train_y = train_y - t_mu[np.newaxis, :]
valid_y = valid_y - t_mu[np.newaxis, :]
t_sigma = train_y.std(axis=0)
train_y = train_y / t_sigma[np.newaxis, :]
valid_y = valid_y / t_sigma[np.newaxis, :]
return [train_x, valid_x, train_y, valid_y]
t_mu = train_y.mean(axis=0)
train_y = train_y - t_mu[np.newaxis, :]
t_sigma = train_y.std(axis=0)
train_y = train_y / t_sigma[np.newaxis, :]
return [train_x, train_y]
def mics_train_valid_mask_split(train_valid_mask, fold=None):
"""split training and validation mask for gcnn (HCP only)
Args:
train_valid_mask (list): list of training and validation mask
fold (int, optional): index of fold for validation, if None, no
validation is going to be returned
Returns:
Tuple: training and validation mask
"""
# Data split
if fold is not None:
valid_mask = train_valid_mask[fold]
train_list = np.delete(train_valid_mask, fold, axis=0)
else:
valid_mask = None
train_list = train_valid_mask
train_mask = np.zeros(train_valid_mask[0].shape)
for i in range(len(train_list)):
train_mask = np.logical_or(train_mask, train_list[i])
return [train_mask, valid_mask]
def mics_hcp_log(model_name, out_path, **kwargs):
"""calculate the test result and save the log
Args:
model_name (str): name of the model
out_path (str): path to save the log npz file
**kwargs: record of training, validation and test value
Returns:
None
"""
val_cor = kwargs['val_cor_log']
tes_cor = kwargs['tes_cor_log']
n_folds = tes_cor.shape[0]
temp = np.mean(val_cor, axis=-1)
temp = np.mean(temp, axis=1)
index = np.argmax(temp, axis=-1)
print('Optimal index for each fold at:', index)
result = np.array([tes_cor[i, index[i], :] for i in range(n_folds)])
# avg = np.mean(result, axis=0)
# err = np.std(result, axis=0) / np.sqrt(n_folds)
temp = np.mean(result, axis=1)
print('Optimal result for each fold:', temp)
avg_a = np.mean(temp, axis=0)
# err_a = np.std(temp, axis=0) / np.sqrt(n_folds)
print('Final test result:', avg_a)
kwargs['metric'] = avg_a
# save record value for future use
date_str = time.strftime("%Y_%m_%d_%H_%M")
os.makedirs(out_path, exist_ok=True)
file_str = 'HCP_' + model_name + '_' + date_str + '.npz'
name_str = os.path.join(out_path, file_str)
np.savez(name_str, **kwargs)
print('log saved at:', file_str)
return
def mics_hcp_infer(model, x, y, sigma, x_train=None, y_train=None):
"""evaluate model prediction for given data (HCP only)
Args:
model (keras.models.Model): keras DNN model
x (ndarray): input x data
y (ndarray): y data
sigma (ndarray): std of training y data
x_train (ndarray, optional): training x data
y_train (ndarray, optional): training y data
Returns:
Tuple: correlation and MAE between real and predicted y, and predicted
y value
"""
y_pred = model.predict(x, batch_size=48, verbose=0)
cor = np.zeros((y.shape[-1]))
mae = np.zeros((y.shape[-1]))
for i in range(y.shape[-1]):
cor[i] = pearsonr(y_pred[:, i], y[:, i])[0]
mae[i] = np.mean(np.abs(y_pred[:, i] - y[:, i])) * sigma[i]
if x_train is None:
return cor, mae, y_pred
else:
y_pred_t = model.predict(x_train, batch_size=48, verbose=0)
cor_train = np.zeros((y_train.shape[-1]))
for i in range(y_train.shape[-1]):
cor_train[i] = pearsonr(y_pred_t[:, i], y_train[:, i])[0]
return cor, mae, y_pred, cor_train
def mics_hcp_gcnn_eval(preds, input_y, mask, sigma=None, train_mask=None):
"""evaluate model prediction for given data (HCP and gcnn only)
Args:
preds (ndarray): predicted y value
input_y (ndarray): real y value
mask (ndarray): mask on y value
sigma (ndarray, optional): std of training y data
train_mask (ndarray, optional): mask on training y value
Returns:
TYPE: correlation, loss and MAE between real and predicted y
"""
index = np.nonzero(mask)[0]
pred = preds[index, :]
real = input_y[index, :]
los = np.mean(np.mean(np.square(pred - real), axis=-1))
cor = np.zeros((input_y.shape[-1]))
mae = np.zeros((input_y.shape[-1]))
for i in range(input_y.shape[-1]):
cor[i] = pearsonr(pred[:, i], real[:, i])[0]
if sigma is not None:
mae[i] = np.mean(np.abs(pred[:, i] - real[:, i])) * sigma[i]
if train_mask is None:
return cor, los, mae
else:
index = np.nonzero(train_mask)[0]
pred = preds[index, :]
real = input_y[index, :]
cor_train = np.zeros((input_y.shape[-1]))
for i in range(input_y.shape[-1]):
cor_train[i] = pearsonr(pred[:, i], real[:, i])[0]
return cor, los, mae, cor_train
def mics_infer_metric(dataloader,
net,
criterion,
device,
t_sigma=None,
need_value=False):
'''performance inference with net on data from dataloader and calculate
metric
Args:
dataloader: dataloader to load data for PyTorch framework
net: PyTorch deep learning network
criterion: criterion for loss calculation
t_sigma (float, optional): std of training y data, only use if sex is
not the behavioral measuers
need_value (bool, optional): whether return record of real and
predicted value
Returns:
Tuple: if t_sigma is not None, correlation, MAE and loss are returned.
If t_sigma is None, auccuracy and loss are returned. If need_value
set to True, tuple returned also returns record of real and
predicted y value alongside the metrics. If need_value is false,
only metrics are returned.
'''
# initialize variable for record
record_loss = 0.0
if t_sigma is None:
record_correct = 0.0 # count of correct prediction
record_total = 0.0 # count of total prediction
record_real = np.zeros((0))
record_pred = np.zeros((0, 2))
else:
record_real = np.zeros((0, 1)) # real value
record_pred = np.zeros((0, 1)) # prediction value
# perform inference
for (x, y) in dataloader:
x, y = x.to(device), y.to(device)
outputs = net(x)
loss = criterion(outputs, y)
record_loss += loss.item()
record_real = np.concatenate((record_real, y.data.cpu().numpy()),
axis=0)
record_pred = np.concatenate((record_pred, outputs.data.cpu().numpy()),
axis=0)
if t_sigma is None:
_, predicted = torch.max(outputs.data, 1)
record_total += y.size(0)
record_correct += (predicted == y.data).sum()
# metric calculation
loss = record_loss / len(dataloader)
if t_sigma is None:
aucc = record_correct.to(torch.float) / record_total
if need_value:
return aucc, loss, record_real, record_pred
else:
return aucc, loss
else:
corr = pearsonr(record_real, record_pred)[0]
mae = np.mean( | np.abs(record_real - record_pred) | numpy.abs |
"""
.. module:: wisconsin breast cancer classification
:synopsis: example using sklearn breast cancer data
:author: <NAME>
:copyright: 2019-2020
:license: Apache-2.0
"""
import os
import sys
sys.path.insert(0, os.path.join('..', 'amicus'))
sys.path.insert(0, os.path.join('..', '..', 'amicus'))
import pathlib
import pandas as pd
import numpy as np
import sklearn.datasets
from amicus import Project
# Loads cancer data and converts from numpy arrays to a pandas DataFrame.
cancer = sklearn.datasets.load_breast_cancer()
df = pd.DataFrame(
data = np.c_[cancer['data'], cancer['target']],
columns = | np.append(cancer['feature_names'], ['target']) | numpy.append |
"""
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
import fdasrsf.bayesian_functions as bf
import fdasrsf.fPCA as fpca
import fdasrsf.geometry as geo
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import interp1d
from scipy.linalg import svd, cholesky
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform, pdist
import GPy
from numpy.linalg import norm, inv
from numpy.random import rand, normal
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
from tqdm import tqdm
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
class fdawarp:
"""
This class provides alignment methods for functional data using the SRVF framework
Usage: obj = fdawarp(f,t)
:param f: (M,N): matrix defining N functions of M samples
:param time: time vector of length M
:param fn: aligned functions
:param qn: aligned srvfs
:param q0: initial srvfs
:param fmean: function mean
:param mqn: mean srvf
:param gam: warping functions
:param psi: srvf of warping functions
:param stats: alignment statistics
:param qun: cost function
:param lambda: lambda
:param method: optimization method
:param gamI: inverse warping function
:param rsamps: random samples
:param fs: random aligned functions
:param gams: random warping functions
:param ft: random warped functions
:param qs: random aligned srvfs
:param type: alignment type
:param mcmc: mcmc output if bayesian
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 15-Mar-2018
"""
def __init__(self, f, time):
"""
Construct an instance of the fdawarp class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.time = time
self.rsamps = False
def srsf_align(self, method="mean", omethod="DP2", center=True,
smoothdata=False, MaxItr=20, parallel=False, lam=0.0,
cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP2)
:param center: center warping functions (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param MaxItr: Maximum number of iterations (default = 20)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> obj = fs.fdawarp(f,time)
>>> obj.srsf_align()
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
f0 = self.f
self.method = omethod
methods = ["mean", "median"]
self.type = method
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((self.time[-1] - self.time[0]) * gamI + self.time[0], self.time, mf)
mq = uf.f_to_srsf(mf, self.time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, MaxItr+2))
tmp[:,0] = mf
mf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = self.f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r],
self.time, q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0],
omethod, lam, grid_dim)
gam_dev = np.zeros((M, N))
vtil = np.zeros((M,N))
dtil = np.zeros(N)
for k in range(0, N):
f[:, k, r + 1] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], self.time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
v = q[:, k, r + 1] - mq[:,r]
d = np.sqrt(trapz(v*v, self.time))
vtil[:,k] = v/d
dtil[k] = 1.0/d
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, self.time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
mf[:, r + 1] = ftemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, self.time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
stp = .3
vbar = vtil.sum(axis=1)*(1/dtil.sum())
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = mq[:,r] + stp*vbar
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mq[:, r + 1] * np.abs(mq[:, r + 1]), self.time)
mf[:, r + 1] = np.median(f0[1, :])+tmp
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
if center:
r += 1
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r], self.time,
q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0], omethod,
lam, grid_dim)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (self.time[-1] - self.time[0]) * gamI + self.time[0]
mq[:, r + 1] = np.interp(time0, self.time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, self.time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, self.time, f[:, k, r])
gam[:, k] = np.interp(time0, self.time, gam[:, k])
else:
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
# Aligned data & stats
self.fn = f[:, :, r + 1]
self.qn = q[:, :, r + 1]
self.q0 = q[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq[:, r + 1]
tmp = np.zeros(M)
tmp[1:] = cumtrapz(self.mqn * np.abs(self.mqn), self.time)
self.fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def plot(self):
"""
plot plot functional alignment results
Usage: obj.plot()
"""
M = self.f.shape[0]
plot.f_plot(self.time, self.f, title="f Original Data")
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), self.gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(self.time, self.fn, title="Warped Data")
mean_f0 = self.f.mean(axis=1)
std_f0 = self.f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Warped Data: Mean $\pm$ STD")
plot.f_plot(self.time, self.fmean, title="$f_{mean}$")
plt.show()
return
def gauss_model(self, n=1, sort_samples=False):
"""
This function models the functional data using a Gaussian model
extracted from the principal components of the srvfs
:param n: number of random samples
:param sort_samples: sort samples (default = T)
:type n: integer
:type sort_samples: bool
"""
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
# Parameters
eps = np.finfo(np.double).eps
binsize = np.diff(time)
binsize = binsize.mean()
M = time.size
# compute mean and covariance in q-domain
mq_new = qn.mean(axis=1)
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
qn2 = np.vstack((qn, m_new))
C = np.cov(qn2)
q_s = | np.random.multivariate_normal(mqn, C, n) | numpy.random.multivariate_normal |
import os
from django.urls import path, include
import face_recognition
import cv2
from imutils.video import VideoStream
import imutils
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
# load our serialized face detector model from disk
prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model(r"C:\Users\mkjsr\OneDrive\Desktop\Django_mask_attendance\main_base\mask_detector.model")
def detect_faces(frame,email):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
lable = "Not Verified"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR,'face_dataset')
loc=(str(MEDIA_ROOT)+'\\'+str(email)+'.jpg')
face_1_image = face_recognition.load_image_file(loc)
small_frame_1 = cv2.resize(face_1_image, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame_1 = small_frame_1[:, :, ::-1]
face_1_face_encoding = face_recognition.face_encodings(rgb_small_frame_1)[0]
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * | np.array([w, h, w, h]) | numpy.array |
from __future__ import print_function
import numpy as np
from dataflow.lib import err1d
def extend(a, b):
"""
Extend *a* to match the number of dimensions of *b*.
This adds dimensions to the end of *a* rather than the beginning. It is
equivalent to *a[..., None, None]* with the right number of None elements
to make the number of dimensions match (or np.newaxis if you prefer).
For example::
from numpy.random import rand
a, b = rand(3, 4), rand(3, 4, 2)
a + b
==> ValueError: operands could not be broadcast together with shapes (3,4) (3,4,2)
c = extend(a, b) + b
c.shape
==> (3, 4, 2)
Numpy broadcasting rules automatically extend arrays to the beginning,
so the corresponding *lextend* function is not needed::
c = rand(3, 4) + rand(2, 3, 4)
c.shape
==> (2, 3, 4)
"""
if np.isscalar(a):
return a
extra_dims = (np.newaxis,)*(b.ndim-a.ndim)
return a[(..., *extra_dims)]
def indent(text, prefix=" "):
"""
Add a prefix to every line in a string.
"""
return "\n".join(prefix+line for line in text.splitlines())
def group_data(datasets):
"""
Groups data files by intent and polarization cross section.
Returns a dictionary with the groups, keyed by (intent,polarization).
"""
# TODO: also need to group by temperature/field
groups = {}
for d in datasets:
groups.setdefault((d.intent, d.polarization), []).append(d)
return groups
def group_by_xs(datasets):
"""
Return datasets grouped by polarization cross section.
"""
cross_sections = {}
for data in datasets:
cross_sections.setdefault(data.polarization, []).append(data)
#print("datasets", [":".join((d.name, d.entry, d.polarization, d.intent)) for d in datasets])
#print("xs", cross_sections)
return cross_sections
def group_by_key(key, datasets):
"""
Return datasets grouped by a value that can be found in a refldata file.
Handle dotted namespace through recursive lookup.
Handle union with comma. (e.g. key = "polarization,sample.name" would
create group where sample.name and polarization are the same for all)
"""
groups = {}
key_items = key.split(",")
for data in datasets:
groupkey = []
for item in key_items:
item = item.strip()
value = data
for k in item.split("."):
value = getattr(value, k)
groupkey.append(value)
groupkey = tuple(sorted(groupkey))
groups.setdefault(groupkey, []).append(data)
return groups
def group_by_intent(datasets):
"""
Return datasets grouped by intent.
"""
intents = {}
for data in datasets:
intents.setdefault(data.intent, []).append(data)
#print("datasets", [":".join((d.name, d.entry, d.polarization, d.intent)) for d in datasets])
#print("xs", cross_sections)
return intents
def nearest(x, xp, fp=None):
"""
Return the *fp* value corresponding to the *xp* that is nearest to *x*.
If *fp* is missing, return the index of the nearest value.
"""
if len(xp) == 1:
if np.isscalar(x):
return fp[0] if fp is not None else 0
else:
return np.array(len(x)*(fp if fp is not None else [0]))
# if fp is not provided, want to return f as an index into the array xp
# for the target values x, so set it to integer indices. if fp is
# provided, make sure it is an array.
fp = np.arange(len(xp)) if fp is None else np.asarray(fp)
# make sure that the xp array is sorted
xp = np.asarray(xp)
if np.any(np.diff(xp) < 0.):
index = np.argsort(xp)
xp, fp = xp[index], fp[index]
# find the midpoints of xp and use that as the index
xp = 0.5*(xp[:-1] + xp[1:])
return fp[np.searchsorted(xp, x)]
def plot_sa(data):
"""
Plot spin asymmetry data.
"""
from matplotlib import pyplot as plt
from uncertainties.unumpy import uarray as U, nominal_values, std_devs
from dataflow.lib.errutil import interp
# TODO: interp doesn't test for matching resolution
data = dict((d.polarization, d) for d in data)
pp, mm = data['++'], data['--']
v_pp = U(pp.v, pp.dv)
v_mm = interp(pp.x, mm.x, U(mm.v, mm.dv))
sa = (v_pp - v_mm) / (v_pp + v_mm)
v, dv = nominal_values(sa), std_devs(sa)
plt.errorbar(pp.x, v, yerr=dv, fmt='.', label=pp.name)
plt.xlabel("%s (%s)"%(pp.xlabel, pp.xunits) if pp.xunits else pp.xlabel)
plt.ylabel(r'$(R^{++} -\, R^{--}) / (R^{++} +\, R^{--})$')
def test_nearest():
# length 1 arrays
xp, fp = [1], [5]
assert nearest(0, xp) == 0
assert (nearest([0], xp) == [0]).all()
assert (nearest([0, 1], xp) == [0, 0]).all()
assert nearest(0, xp, fp) == fp[0]
assert (nearest([0], xp, fp) == [fp[0]]).all()
assert (nearest([0, 1], xp, fp) == [fp[0]]*2).all()
# constants as arrays
xp, fp = [1, 1, 1], [5, 5, 5]
assert nearest(0, xp) == 0
assert (nearest([0], xp) == [0]).all()
assert (nearest([0, 1], xp) == [0, 0]).all()
assert nearest(0, xp, fp) == fp[0]
assert (nearest([0], xp, fp) == [fp[0]]).all()
assert (nearest([0, 1], xp, fp) == [fp[0]]*2).all()
# actual arrays
xp, fp = [1, 2, 3], [4, 5, 6]
assert nearest(0, xp) == 0
assert (nearest([0], xp) == [0]).all()
assert (nearest([0, 1, 1.1, 1.6, 2.1, 2.9, 3, 3.1], xp)
== [0, 0, 0, 1, 1, 2, 2, 2]).all()
assert nearest(0, xp, fp) == fp[0]
assert (nearest([0], xp, fp) == [fp[0]]).all()
assert (nearest([0, 1, 1.1, 1.6, 2.1, 2.9, 3, 3.1], xp, fp)
== [fp[i] for i in [0, 0, 0, 1, 1, 2, 2, 2]]).all()
# unsorted arrays
xp, fp = [1, 3, 2], [4, 5, 6]
assert nearest(0, xp) == 0
assert (nearest([0], xp) == [0]).all()
assert (nearest([0, 1, 1.1, 1.6, 2.1, 2.9, 3, 3.1], xp)
== [0, 0, 0, 2, 2, 1, 1, 1]).all()
assert nearest(0, xp, fp) == fp[0]
assert (nearest([0], xp, fp) == [fp[0]]).all()
assert (nearest([0, 1, 1.1, 1.6, 2.1, 2.9, 3, 3.1], xp, fp)
== [fp[i] for i in [0, 0, 0, 2, 2, 1, 1, 1]]).all()
def poisson_average(y, dy, norm='monitor'):
r"""
Return the Poisson average of a rate vector *y +/- dy*.
If y, dy is multidimensional then average the first dimension, returning
an item of one fewer dimentsions.
Use *norm='monitor'* When counting against monitor (the default) or
*norm='time'* when counting against time. Use *norm='none'* if *y, dy*
is unnormalized, and the poisson sum should be returned.
The count rate is expressed as the number of counts in an interval $N$
divided by the interval $M$. The rate for the combined interval should
match the rate you would get if you counted for the entire interval,
which is $\sum N_i / \sum M_i$. We do this by inferring the counts
and intervals from the rate and uncertainty, adding them together, and
dividing to get the average rate over the entire interval.
With counts $N$ and monitors $M$ both from Poisson distributions, the
uncertainties are $\sqrt N$ and $\sqrt M$ respectively, and gaussian
error propagation gives
.. math::
:nowrap:
\begin{eqnarray}
y &=& N/M \\
\left(\frac{\Delta y}{y}\right)^2
&=& \left(\frac{\Delta N}{N}\right)^2
+ \left(\frac{\Delta M}{M}\right)^2 \\
&=& \left(\frac{1}{N}\right)
+ \left(\frac{1}{M}\right) \\
\Delta y &=& y \sqrt{1/N + 1/M}
\end{eqnarray}
Inverting, we get
.. math::
:nowrap:
\begin{eqnarray}
M &=& y (y+1) / \Delta y^2 \\
N &=& y M
\end{eqnarray}
When counts are zero, $y = 0$ and $M = 0$ according to the above formula.
To correctly average intervals that may include zero counts, we need to
be sure that the count uncertainty $\Delta N = 1$ when $N = 0$, which
leads to a count rate of $0 \pm 1/M$, and the formula above needs to be
adjusted accordingly, with $M = 1/ \Delta y$ when $N = 0$.
To average a group of measurements $y_1, \ldots, y_n$ we first
convert to counts and monitors, then set the rate to the sum of
the counts over the sum of the monitors. This gives
.. math::
:nowrap:
\begin{eqnarray}
M_i &=& y_i (y_i + 1) / \Delta y_i^2 \\
N_i &=& y_i M_i \\
\bar y &=& \sum N_i / \sum M_i \\
\Delta \bar y &=& \bar y \sqrt{1/\sum_{N_i} + 1/\sum_{M_i}}
\end{eqnarray}
When counting against time the monitor uncertainty $\Delta M$ is
effectively zero compared to uncertainty in the counts, and so
the formulas are a little simpler:
.. math::
y &= N/M \\
\Delta y &= y \sqrt{1/N} \\
M &= y / \Delta y^2 \\
N &= y M
Again, zero counts leads to $M = 1 / \Delta y$.
Averaging gives
.. math::
:nowrap:
\begin{eqnarray}
M_i &=& y_i / \Delta y_i^2 \\
N_i &=& y_i M_i \\
\bar y &=& \sum N_i / \sum M_i \\
\Delta \bar y &=& \bar y \sqrt{1/\sum_{N_i}}
\end{eqnarray}
Our expression does not account for detector efficiency, attenuators
and dead-time correction. In practice we have an additional scaling
value $A \pm \Delta A$ for each point, giving
.. math::
:nowrap:
\begin{eqnarray}
y &=& A N/M \\
\left(\frac{\Delta y}{y}\right)^2
&=& \left(\frac{\Delta A}{A}\right)^2
+ \left(\frac{\Delta N}{N}\right)^2
+ \left(\frac{\Delta M}{M}\right)^2
\end{eqnarray}
Clearly with two inputs $y$, $\Delta y$ we cannot uniquely recover
the four parameters $A$, $\Delta A$, $M$, $N$, and Poisson averaging
will not work properly. In these cases, a monitor weighted average
is better for error propagation. Monitor weighted averaging
works well for everything except data with many zero counts.
"""
if norm not in ("monitor", "time", "none"):
raise ValueError("expected norm to be time, monitor or none")
# Check whether we are combining rates or counts. If it is counts,
# then simply sum them, and sum the uncertainty in quadrature. This
# gives the expected result for poisson statistics, with counts over
# the combined interval having variance equal to the sum of the counts.
# It even gives correct results for very low count rates with many of
# the individual counts giving zero, so long as variance on zero counts
# is set to zero rather than one.
if norm == "none":
bar_y = np.sum(y, axis=0)
bar_dy = np.sqrt(np.sum(dy**2, axis=0))
return bar_y, bar_dy
dy = dy + (dy == 0) # Protect against zero counts in division
# Recover monitor and counts
monitors = y*(y+1)/dy**2 if norm == "monitor" else y/dy**2 # if "time"
monitors[y == 0] = 1./dy[y == 0] # Special handling for 0 counts
counts = y*monitors
# Compute average rate
combined_monitors = np.sum(monitors, axis=0)
combined_counts = np.sum(counts, axis=0)
bar_y = combined_counts/combined_monitors
if norm == "time":
bar_dy = bar_y * np.sqrt(1./combined_monitors)
elif np.isscalar(bar_y):
if bar_y == 0:
# When bar_y is zero then 1/N is undefined and so sqrt(1/N + 1/M) fails.
# Instead use |dy| = 1/M*sqrt((dN)^2 + 1/M) with dN = 1.
bar_dy = 1./combined_monitors * np.sqrt(1. + 1./combined_monitors)
else:
# When bar_y is not zero then use |dy| = N/M * sqrt(1/N + 1/M)
bar_dy = bar_y * np.sqrt(1./combined_counts + 1./combined_monitors)
else:
# Following the scalar case above, first build bar_dy assuming
# that y is zero since it works for all y, then fill in the values
# for y not zero. Can't do this the other way since the expression
# for y not zero will raise errors when y is zero.
bar_dy = 1./combined_monitors * np.sqrt(1. + 1./combined_monitors)
idx = (bar_y != 0)
bar_dy[idx] = bar_y[idx] * np.sqrt(1./combined_counts[idx]
+ 1./combined_monitors[idx])
#print("est. monitors:", monitors)
#print("est. counts:", counts)
#print("poisson avg", counts.shape, bar_y.shape, bar_dy.shape)
return bar_y, bar_dy
def gaussian_average(y, dy, w, dw=0):
bar_y, bar_y_var = err1d.average(y, dy**2, w, dw**2)
return bar_y, | np.sqrt(bar_y_var) | numpy.sqrt |
"""
@belovm96:
Script for object encoding in 100k subset of Google's Conceptual Captions dataset
Object encoder is Google's Universal Sentence Encoder, and the object prediction model is Inception (v.3)
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from keras.applications.imagenet_utils import decode_predictions
import tensorflow_hub as hub
import os
import numpy as np
import PIL
from keras.applications.inception_v3 import InceptionV3
from keras import Model
#Using Google Universal Sentence Encoder
def embed_useT(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
#Encode images and save the object predictions to a file
def caption_enc(img_list, obj_model, IMG_PATH):
file = open('/home/jupyter/image-captioning-approaches/analysis/objects_in_images.txt','w')
lst_enc = []
for i, img in enumerate(img_list):
if i % 3000 == 0:
print('Image', i)
try:
image = PIL.Image.open(os.path.join(IMG_PATH, img))
image = np.asarray(image.resize((299,299))) / 255.0
predict_obj = obj_model.predict(np.array([image]))
top_labels = decode_predictions(predict_obj, top=5)
obj_list = [label[1] for label in top_labels[0]]
str_labels = " ".join(obj_list)
lst_enc.append(str_labels.replace('_', ' '))
file.write(img+'\t'+str_labels.replace('_', ' ')+'\n')
except:
print(img, 'not an image...')
file.close()
return lst_enc
img_path = '/dataset/google-cc/100k/images'
list_100k_imgs = os.listdir(img_path)
#Load Inception (v.3) CNN and Google's Universal Sentence Encoder
img_model = InceptionV3(weights='imagenet')
new_input = img_model.input
new_output = img_model.output
obj_pred = Model(new_input, new_output)
embed = embed_useT('module_useT')
#Create the list of the objects detected in images
embed_imgs_100 = caption_enc(list_100k_imgs, obj_pred, img_path)
#Encode the objects detected in images
train_1 = embed_imgs_100[:45000]
train_2 = embed_imgs_100[45000:70000]
train_first = embed(train_1)
train_sec = embed(train_2)
enc_train = np.concatenate((train_first, train_sec))
val = embed_imgs_100[70000:79000]
enc_val = embed(val)
test = embed_imgs_100[79000:89000]
enc_test = embed(test)
#Save the encodings
np.save("/home/jupyter/image-captioning-approaches/analysis/inception_enc_obj_train.npy", enc_train)
np.save("/home/jupyter/image-captioning-approaches/analysis/inception_enc_obj_test.npy", enc_test)
| np.save("/home/jupyter/image-captioning-approaches/analysis/inception_enc_obj_validation.npy", enc_val) | numpy.save |
import scipy.linalg as la
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
"""
Creating an entire pressure field based on the locations of various sources
"""
def pressure_field(positions,frequencies,
field_points = -1,
time = 0.0,
areas = [0.001],
velocities = [0.01],
strengths = [0.01],
phases = [0],
x_range = [-1,1],
y_range = [-1,1],
z_range = [-1,1],
point_density = 100,
directivity_distance = 1000,
num_directivity_points = 10000,
method = "Monopole Addition",
dimensions = 2,
directivity_only = False,
directivity_plot_alone = False,
show_plots = False,
pressure_limits = [-100,100]):
# Making all arrays that describe the sources be equal lengths
num_sources = len(positions)
positions = np.asarray(positions)
if np.size(frequencies) == 1:
frequencies = | np.ones(num_sources) | numpy.ones |
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# Copyright (c) 2015, <NAME> and <NAME>.
# License: GNU-GPL Style.
# How to cite GBpy:
# Banadaki, <NAME>. & <NAME>. "An efficient algorithm for computing the primitive
# bases of a general lattice plane",
# Journal of Applied Crystallography 48, 585-588 (2015). doi:10.1107/S1600576715004446
import numpy as np
from . import integer_manipulations as int_man
from . import misorient_fz as mis_fz
from . import tools as trans
import numpy.linalg as nla
def proper_ptgrp(cryst_ptgrp):
"""
Returns the proper point group corresponding to a crystallographic point
group
Parameters
----------------
cryst_ptgrp: str
Crystallogrphic point group in Schoenflies notation
Returns
----------
proper_ptgrp: str
Proper point group in Schoenflies notation
"""
if cryst_ptgrp in ['D3', 'D3d']:
proper_ptgrp = 'D3'
if cryst_ptgrp in ['D4', 'D4h']:
proper_ptgrp = 'D4'
if cryst_ptgrp in ['D6', 'D6h']:
proper_ptgrp = 'D6'
if cryst_ptgrp in ['O', 'Oh']:
proper_ptgrp = 'O'
# prop_grps = ['C1', 'C2', 'C3', 'C4', 'C6', 'D2', 'D3', 'D4', 'D6',
# 'T', 'O']
# laue_grps = ['Ci', 'C2h', 'C3i', 'C4h', 'C6h', 'D2h', 'D3d', 'D4h', 'D6h',
# 'Th', 'Oh']
# if cryst_ptgrp in laue_grps:
# proper_ptgrp =
# elif cryst_ptgrp in prop_grps:
# proper_ptgrp = cryst_ptgrp
return proper_ptgrp
def largest_odd_factor(var_arr):
"""
Function that computes the larges odd factors of an array of integers
Parameters
-----------------
var_arr: numpy.array
Array of integers whose largest odd factors needs to be computed
Returns
------------
odd_d: numpy.array
Array of largest odd factors of each integer in var_arr
"""
if var_arr.ndim == 1:
odd_d = np.empty(np.shape(var_arr))
odd_d[:] = np.NaN
ind1 = np.where((np.remainder(var_arr, 2) != 0) | (var_arr == 0))[0]
if np.size(ind1) != 0:
odd_d[ind1] = var_arr[ind1]
ind2 = np.where((np.remainder(var_arr, 2) == 0) & (var_arr != 0))[0]
if np.size(ind2) != 0:
odd_d[ind2] = largest_odd_factor(var_arr[ind2] / 2.0)
return odd_d
else:
raise Exception('Wrong Input Type')
def compute_inp_params(lattice, sig_type):
# Leila: for the tolerance value for D6 I chose 1e-2
# to get the values of mu and nu in table 2 in grimmers paper.
"""
tau and kmax necessary for possible integer quadruple combinations
are computed
Parameters
----------------
lattice: class
Attributes of the underlying lattice class
sig_type: {'common', 'specific'}
Returns
-----------
tau: float
tau is a rational number :math:`= \\frac{\\nu}{\\mu}`
tau is equal to (a/c)^2
kmax: float
kmax is an integer that depends on :math:`\\mu \\ , \\nu`
for hcp: kmax equals to F/\Sigma. kmax is always a divisor of 12\\mu\\nu.
F/\Sigma is a dicisor of 6\\mu\\nu if \\nu is even and a divisor od 3\\mu\\nu
if \\nu is a multiple of 4.
"""
lat_params = lattice.lat_params
cryst_ptgrp = proper_ptgrp(lattice.cryst_ptgrp)
if cryst_ptgrp == 'D3':
c_alpha = np.cos(lat_params['alpha'])
tau = c_alpha / (1 + 2 * c_alpha)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
rho = mu - 3 * nu
kmax = 4 * mu * rho
elif sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D4':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
kmax = 4 * mu * nu
if sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D6':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-2)
if np.remainder(nu, 2) == 0:
if | np.remainder(nu, 4) | numpy.remainder |
import time
import Trekker
import numpy as np
import os
import vtk
import pickle
import threading
import math
"""
Thread to update the coordinates with the fiducial points
co-registration method while the Navigation Button is pressed.
Sleep function in run method is used to avoid blocking GUI and
for better real-time navigation
"""
class ComputeTracts(threading.Thread):
"""
Thread to update the coordinates with the fiducial points
co-registration method while the Navigation Button is pressed.
Sleep function in run method is used to avoid blocking GUI and
for better real-time navigation
"""
def __init__(self, tracker, position, n_tracts):
threading.Thread.__init__(self)
# trekker variables
self.tracker = tracker
self.position = position
self.n_tracts = n_tracts
# threading variable
self._pause_ = False
# self.mutex = threading.Lock()
# self.start()
def stop(self):
# self.mutex.release()
self._pause_ = True
def run(self):
if self._pause_:
return
else:
# self.mutex.acquire()
try:
seed = self.position
chunck_size = 10
nchuncks = math.floor(self.n_tracts / chunck_size)
# print("The chunck_size: ", chunck_size)
# print("The nchuncks: ", nchuncks)
root = vtk.vtkMultiBlockDataSet()
# n = 1
n_tracts = 0
# while n <= nchuncks:
for n in range(nchuncks):
# Compute the tracts
trk_list = []
# for _ in range(chunck_size):
self.tracker.set_seeds(np.repeat(seed, chunck_size, axis=0))
if self.tracker.run():
trk_list.extend(self.tracker.run())
# Transform tracts to array
trk_arr = [ | np.asarray(trk_n) | numpy.asarray |
import numpy as np
import os
import torch
import torchvision
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, random_split
from torchvision import transforms
import PIL.Image as Image
from sklearn.datasets import load_boston
## Custom PyTorch Dataset Class wrapper
class CustomDataset(Dataset):
def __init__(self, data, target, device=None, transform=None):
self.transform = transform
if device is not None:
# Push the entire data to given device, eg: cuda:0
self.data = data.float().to(device)
self.targets = target.long().to(device)
else:
self.data = data.float()
self.targets = target.long()
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_data = self.data[idx]
label = self.targets[idx]
if self.transform is not None:
sample_data = self.transform(sample_data)
return (sample_data, label) # .astype('float32')
class CustomDataset_WithId(Dataset):
def __init__(self, data, target, transform=None):
self.transform = transform
self.data = data # .astype('float32')
self.targets = target
self.X = self.data
self.Y = self.targets
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_data = self.data[idx]
label = self.targets[idx]
if self.transform is not None:
sample_data = self.transform(sample_data)
return sample_data, label, idx # .astype('float32')
## Utility function to load datasets from libsvm datasets
def csv_file_load(path, dim, save_data=False):
data = []
target = []
with open(path) as fp:
line = fp.readline()
while line:
temp = [i for i in line.strip().split(",")]
target.append(int(float(temp[-1]))) # Class Number. # Not assumed to be in (0, K-1)
temp_data = [0] * dim
count = 0
for i in temp[:-1]:
# ind, val = i.split(':')
temp_data[count] = float(i)
count += 1
data.append(temp_data)
line = fp.readline()
X_data = np.array(data, dtype=np.float32)
Y_label = np.array(target)
if save_data:
# Save the numpy files to the folder where they come from
data_np_path = path + '.data.npy'
target_np_path = path + '.label.npy'
np.save(data_np_path, X_data)
np.save(target_np_path, Y_label)
return (X_data, Y_label)
def libsvm_file_load(path, dim, save_data=False):
data = []
target = []
with open(path) as fp:
line = fp.readline()
while line:
temp = [i for i in line.strip().split(" ")]
target.append(int(float(temp[0]))) # Class Number. # Not assumed to be in (0, K-1)
temp_data = [0] * dim
for i in temp[1:]:
ind, val = i.split(':')
temp_data[int(ind) - 1] = float(val)
data.append(temp_data)
line = fp.readline()
X_data = np.array(data, dtype=np.float32)
Y_label = np.array(target)
if save_data:
# Save the numpy files to the folder where they come from
data_np_path = path + '.data.npy'
target_np_path = path + '.label.npy'
np.save(data_np_path, X_data)
| np.save(target_np_path, Y_label) | numpy.save |
import msgpack as msg
import msgpack_numpy as m
import numpy as np
from io import BytesIO
import json
from pathlib import Path
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats import linregress
from ICalcify.fitting import FittingResult
from ICalcify.regression import RegResult
m.patch()
class ObjectBranch(object):
def __init__(self, name, dtype, branch):
self.name = name
self.dtype = dtype
if type(branch) == list:
self.branch = np.array(branch,dtype=object)
elif type(branch) == np.array or type(branch) == np.ndarray:
self.branch = branch
else:
raise TypeError('Argument \'Branch\' must be of type list, or numpy.(nd)array, got {}'.format(type(branch)))
def __len__(self):
return len(self.branch)
def __repr__(self):
return "{{Name: '{}', Type: '{}', Length: {}}}".format(self.name,self.dtype,self.__len__())
def __str__(self):
ll = self.__len__()
if ll > 20:
return "Name: '{}'\n{}\n...\n{}\nType: '{}', Len: {}".format(self.name,"\n".join([str(x) for x in self.branch[:10]]),
"\n".join([str(x) for x in self.branch[-10:]]),self.dtype,ll)
else:
return "Name: '{}'\n{}\nType: '{}', Len: {}".format(self.name,"\n".join([str(x) for x in self.branch]),self.dtype,ll)
def __iter__(self):
return self.branch.__iter__()
def __next__(self):
return self.branch.__next__()
def _repr_html_(self):
return "<h3><b>'{}':</b> Type: '{}', Length: {}</h3>".format(self.name,self.dtype,self.__len__())
def cut(self,callable):
return Branch(f"{self.name}_Cut",self.dtype,self.branch[[callable(x) for x in self.branch]])
class StringBranch(ObjectBranch):
def __init__(self,name,branch):
branch = np.array(branch)
super().__init__(name,'String',branch)
class FloatBranch(ObjectBranch):
def __init__(self,name,branch):
branch = np.array(branch,dtype=np.float64)
super().__init__(name,'f64',branch)
def against(self, other):
if not type(other) == FloatBranch:
raise TypeError("Other branch must be of type FloatBranch")
if not len(self.branch) == len(other.branch):
raise RuntimeError("Both self and other branch must be the same length.")
return PointBranch(f"{self.name} X {other.name}",np.array([[x,y] for x,y in zip(self.branch,other.branch)]))
def plot(self,show=False,flag=None):
if show:
plt.figure()
plt.plot(np.arange(self.__len__()),self.branch,label=f"{self.name}_{flag}" if flag else self.name)
plt.legend()
if show:
plt.show()
class ThreeVecBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(ThreeVecBranch.from_dict,branch)))
branch = np.array(branch)
super().__init__(name,'ThreeVec',branch)
def from_dict(obj):
return np.array([obj['x0'],obj['x1'],obj['x2']])
def scatter(self,show=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(self.branch[:,0],self.branch[:,1],self.branch[:,2],marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
plt.show()
class ThreeMatBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(ThreeMatBranch.from_dict,branch)))
branch = np.array(branch)
super().__init__(name,'ThreeMat',branch)
def from_dict(obj):
return np.array([np.array([obj[ky]['x0'],obj[ky]['x1'],obj[ky]['x2']]) for ky in obj])
class FourVecBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(FourVecBranch.from_dict,branch)))
branch = np.array(branch)
super().__init__(name,'FourVec',branch)
def from_dict(obj):
return np.array([obj['x0'],obj['x1'],obj['x2'],obj['x3']])
class FourMatBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(FourMatBranch.from_dict,branch)))
branch = np.array(branch)
super().__init__(name,'FourMat',branch)
def from_dict(obj):
return np.array([np.array([obj[ky]['x0'],obj[ky]['x1'],obj[ky]['x2'],obj[ky]['x3']]) for ky in obj])
class BinBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(BinBranch.from_dict,branch)),dtype=object)
else:
branch = np.array([np.array([np.float64(x[0]),np.array(x[1],dtype=np.float64)],dtype='object') for x in branch],dtype=object)
super().__init__(name,'Bin',branch)
def from_dict(obj):
return np.array([obj['count'],np.array(obj['range'])],dtype=object)
def __str__(self):
return "Name: '{}'\n{}\n...\n{}\nType: '{}', Len: {}".format(
self.name,"\n".join(["{}, range({}, {})".format(int(x[0]),x[1][0],x[1][1]) for x in self.branch[:10]]),
"\n".join(["{}, range({}, {})".format(int(x[0]),x[1][0],x[1][1]) for x in self.branch[-10:]]),self.dtype,self.__len__())
def plot(self,show=False,flag=None):
if show:
plt.figure()
x = []
y = []
for bin in self.branch:
x += list(bin[1])
y += [bin[0],bin[0]]
plt.plot(x,y,label=f"{self.name}_{flag}" if flag else self.name)
plt.legend()
if show:
plt.show()
def fit(self,func):
x = []
y = []
for bin in self.branch:
x.append(bin[1].mean())
y.append(bin[0])
popt, pcov = curve_fit(func,x,y)
return FittingResult(func,x,popt,pcov,self.name)
class PointBranch(ObjectBranch):
def __init__(self,name,branch):
if len(branch) > 0:
if type(branch[0]) == dict:
branch = np.array(list(map(PointBranch.from_dict,branch)))
branch = np.array(branch)
super().__init__(name,'Point',branch)
def from_dict(obj):
return | np.array([obj['x'],obj['y']]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 09:20:05 2021
@author: <NAME> & <NAME> & <NAME>
<EMAIL>
"""
from tkinter import *
import numpy as np
#import webbrowser
menu_inicial = Tk()
menu_inicial.title('Graphical User Interface for Attitude Coordinates of Satellites')
#menu_inicial.iconbitmap('@/home/geovani/Documentos//Attitude-Parameters/icon.xbm')
#menu_inicial.call('wm', 'iconphoto', menu_inicial._w, PhotoImage(file='/home/geovani/Documentos/Attitude-Parameters/icon.gif'))
#menu_inicial['bg'] = "black"
menu_inicial.resizable(0, 0)
#menu_inicial.minsize(860, 200)
#menu_inicial.maxsize(960, 200)
#altura e largura do programa
largura = 878
altura = 200
#resolução
largura_screen = menu_inicial.winfo_screenwidth()
altura_screen = menu_inicial.winfo_screenheight()
#posicao da janela
posx = largura_screen/2 - largura/2
posy = altura_screen/2 - altura/2
#definir a geometria
menu_inicial.geometry("%dx%d+%d+%d" % (largura, altura, posx, posy))
#definir como iconic
menu_inicial.state('iconic')
def helpc():
toph = Toplevel()
toph.title = ('Help')
toph.resizable(0, 0)
largura = 470
altura = 100
#resolução
largura_screen = toph.winfo_screenwidth()
altura_screen = toph.winfo_screenheight()
#posicao da janela
posx = largura_screen/2 - largura/2
posy = altura_screen/2 - altura/2
#definir a geometria
toph.geometry("%dx%d+%d+%d" % (largura, altura, posx, posy))
#definir como iconic
toph.state('iconic')
labelh = Label(toph,
text = "This is Graphical User Interface produced by <NAME> \n Oriented by Dr. <NAME> and Dr. <NAME>",
font = "Arial 8",
anchor = N,
justify = RIGHT
).grid(row= 1, column = 1)
label = Label(toph,
text = "For more information: ",
font = "Arial 8"
).grid(row=2, column=1)
link1 = Label(toph, text="https://github.com/geovani-ribeiro/attitude-parameters", fg="blue", cursor="hand2")
link1.grid(row=3, column=1)
#link1.bind("<Button-1>", lambda: webbrowser.open("https://github.com/geovani-ribeiro/attitude-parameters"))
#funções para o algoritmo
def command1():
top1 = Toplevel()
top1.title= ('Graphical User Interface for Attitude Coordinates of Satellites')
top1.resizable(0, 0)
# top1.iconbitmap('@/home/geovani/Documentos/icon.xbm')
#altura e largura do programa
largura = 980
altura = 200
#resolução
largura_screen = top1.winfo_screenwidth()
altura_screen = top1.winfo_screenheight()
#posicao da janela
posx = largura_screen/2 - largura/2
posy = altura_screen/2 - altura/2
#definir a geometria
top1.geometry("%dx%d+%d+%d" % (largura, altura, posx, posy))
top1.maxsize(1000, 200)
#definir como iconic
top1.state('iconic')
#definição das mensagens
label_5 = Label(top1,
text = "Enter the DCM data:",
font = "Arial 11",
justify = LEFT
).grid(row= 1, column = 1)
#final
phi_final = StringVar()
e_final = StringVar()
beta_final = StringVar()
q_final = StringVar()
sigma_final = StringVar()
euler_final = StringVar()
#funcao
def dcm_calc():
cos_Phi = 1/2*(float(C11.get())+float(C22.get())+float(C33.get())-1)
Phi = np.arccos(cos_Phi)
e11 = 1/(2*np.sin(Phi))*(float(C23.get())-float(C32.get()))
e12 = 1/(2*np.sin(Phi))*(float(C31.get())-float(C13.get()))
e13 = 1/(2* | np.sin(Phi) | numpy.sin |
import numpy as np
import matplotlib.pyplot as plt
def init_conc(D, C0):
# Create a nutrient latice
M = np.zeros((D,D))
M = M + C0
return M
def seed(D,n_seed,C0):
L = np.zeros((D,D))
occ = []
for n in range(n_seed):
x,y = np.random.randint(0,D,size=2)
if (x,y) in occ:
x,y = | np.random.randint(0,D,size=2) | numpy.random.randint |
import torch as torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import collections
import glob
import numpy as np
import tqdm
import os
import time
import datetime
from pytz import timezone
class TrainingContext:
def __init__(self, PRINT_EVERY=25):
self.train_losses = []
self.valid_losses = []
self.checkpoint_epoch = 0
self.PRINT_EVERY = PRINT_EVERY
self.device = torch.device("cpu")
def _trainStep(self, batch):
self.model.zero_grad()
loss = self.applyModel(batch)
loss_item = loss.item()
loss.backward()
self.optimizer.step()
return loss_item
def applyModel(self, batch):
raise RuntimeError('not implemented')
def trainLoop(self, epoch):
self.model.train()
tq = tqdm.tqdm(self.trainDL)
losses = []
for bidx, batch in enumerate(tq):
tq.set_description('Train: %i' % bidx)
loss_item = self._trainStep(batch)
losses.append(loss_item)
if bidx % self.PRINT_EVERY == 0:
mean_loss = np.mean(losses)/self.trainDL.batch_size
tq.set_postfix(trainLoss = "{:.8f}".format(mean_loss))
#writer.add_scalar('loss/training', mean_loss, epoch*bidx)
mean_loss = np.mean(losses)/self.trainDL.batch_size
self.train_losses.append(mean_loss)
return mean_loss
def validStep(self, batch):
loss = self.applyModel(batch)
return loss.item()
def validLoop(self, epoch):
self.model.eval()
losses = []
with torch.no_grad():
tq = tqdm.tqdm(self.validDL)
for bidx, batch in enumerate(tq):
tq.set_description('Valid: %i' % bidx)
loss_item = self.validStep(batch)
losses.append(loss_item)
if bidx % self.PRINT_EVERY == 0:
mean_loss = np.mean(losses)/self.validDL.batch_size
tq.set_postfix(validLoss = "{:.8f}".format(mean_loss))
#writer.add_scalar('loss/validation', mean_loss, epoch*bidx)
mean_loss = | np.mean(losses) | numpy.mean |
from dataclasses import dataclass, fields
from datetime import datetime
import numpy as np
import pandas as pd
import re
ROTARY_ENCODER_UNITS_PER_TURN = 8845.0
format_string = "%Y-%m-%d %H:%M:%S.%f" # for parsing the time strings
@dataclass
class Reward:
"""Holds information about a reward event in a logfile"""
date_time: datetime
rX: float # called rX due to indexing duplication in pandas with LogFilePositionLine X etc
rZ: float
reward_type: str = None
def __eq__(self, other):
if isinstance(other, Reward):
return self.rX == other.rX and self.rZ == other.rZ
return NotImplemented
def __key(self):
return (self.rX, self.rZ)
def __hash__(self):
return hash(self.__key())
def __lt__(self, other):
if isinstance(other, Reward):
return self.date_time < other.date_time
return NotImplemented
@dataclass
class LogFilePositionLine:
"""Class for keeping track of position information on a line of a logfile"""
date_time: datetime
X: float
Z: float
Theta: float
MX: float = 0.0
MY: float = 0.0
GainX: float = 0.0
GainY: float = 0.0
Fading: int = 0
RealTimeGainX: int = 0
RealTimeGainY: int = 0
Dark: int = 0
def __iter__(self):
for field in fields(self):
yield getattr(self, field.name)
def __key(self):
return (self.date_time)
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
if isinstance(other, LogFilePositionLine):
return self.__key() == other.__key()
return NotImplemented
def __lt__(self, other):
if isinstance(other, LogFilePositionLine):
return self.date_time < other.date_time
return NotImplemented
class LogFileParser:
def __init__(self, log_file_name: str):
self.first_time = None # important
with open(log_file_name, 'r') as logfile:
lines = logfile.readlines()
# 1) deal with all the lines containing the position of the mouse
log_pos_lines = [line for line in lines if 'GainX' in line]
loglines = []
for line in log_pos_lines:
items = line.split()
date_time = items[0] + " " + items[1]
dt = datetime.strptime(date_time, format_string)
X, Z, Rot, MX, MY, GainX, GainY, Fading, RealTimeGainX, RealTimeGainY, Dark = self.__parse_line(line)
theta = np.rad2deg(Rot / ROTARY_ENCODER_UNITS_PER_TURN)
loglines.append(LogFilePositionLine(
dt, X, Z, theta, MX, MY,
GainX, GainY, Fading, RealTimeGainX, RealTimeGainY, Dark))
# now get the unique entries in the position data
# this is only possible due to the custom methods defined
# for the LogFilePositionLine class (__lt__, __eq__ & __hash__)
self.PosLines = list(set(loglines)) # unordered so...
self.PosLines.sort()
# 2) extract all the reward-related information - all unique timestamps
log_reward_lines = [line for line in lines if 'Reward'in line]
rewards = []
for line in log_reward_lines:
if re.search('Reward[0-9]Positioned',line):
r = self.__get_reward__(line)
r.reward_type = 'Automatic'
rewards.append(r)
if re.search('RewardPositioned',line):
r = self.__get_reward__(line)
r.reward_type = 'Automatic'
rewards.append(r)
if 'Manual Reward_activated' in line:
r = self.__get_reward__(line)
r.reward_type = 'Manual'
rewards.append(r)
if 'Reward_delivered' in line:
r = self.__get_reward__(line)
r.reward_type = 'Delivered'
rewards.append(r)
self.Rewards = rewards
def __parse_line(self, line: str):
items_to_parse = ["X", "Z", "Rot", "MX", "MY", "GainX", "GainY", "Fading", "RealTimeGainX", "RealTimeGainY", "Dark"]
values_to_return = dict.fromkeys(items_to_parse, 0.0)
for item in line.split():
key = item.split('=')
if key[0] in items_to_parse:
values_to_return[key[0]] = float(key[-1])
return values_to_return.values()
def make_dataframe(self):
# Get the unique times for all events in the logfile
# be they position or reward occurences. These are used
# as the indices for the pandas DataFrame
pos_lines_set = set([line.date_time for line in self.PosLines])
reward_set = set([line.date_time for line in self.Rewards])
unique_times = list(set.union(pos_lines_set, reward_set))
unique_times.sort()
first_time = unique_times[0]
self.first_time = first_time
unique_times = [times - first_time for times in unique_times]
pos_d = {'PosX': pd.Series([line.X for line in self.PosLines], index=[line.date_time - first_time for line in self.PosLines]),
'PosY': pd.Series([line.Z for line in self.PosLines], index=[line.date_time - first_time for line in self.PosLines])
}
pos_d = pd.DataFrame(pos_d)
reward_d = pd.DataFrame([line for line in self.Rewards], index=[line.date_time - first_time for line in self.Rewards])
d = pos_d.append(reward_d)
return d.sort_index()
def __get_float_val__(self, line: str) -> float:
return float(line.split("=")[-1])
def __get_int_val__(self, line: str) -> int:
return int(line.split("=")[-1])
def __get_reward__(self, line: str) -> Reward:
items = line.split()
date_time = items[0] + " " + items[1]
dt = datetime.strptime(date_time, format_string)
X = self.__get_float_val__(items[-2])
Z = self.__get_float_val__(items[-1])
return Reward(dt, X, Z)
def getX(self) -> list:
return [line.X for line in self.PosLines]
def getZ(self) -> list:
return [line.Z for line in self.PosLines]
def getTheta(self) -> list:
return [line.Theta for line in self.PosLines]
def getPosTimes(self) -> list:
return [line.date_time for line in self.PosLines]
def analyse_rewards(self):
df = self.make_dataframe()
not_nans = ~df['reward_type'].isna()
delivered = df['reward_type'] == 'Delivered'
dropped_rewards = df[np.logical_and(not_nans, ~delivered)]
delivered_rewards = df[np.logical_and(not_nans, delivered)]
times = self.getPosTimes()
trial_duration = (times[-1]-times[0]).total_seconds()
print(f"Trial duration(s): {trial_duration}")
delivered_times = []
dropped_times = []
time_taken_to_deliver = []
for index, row in delivered_rewards.iterrows():
rx = row.rX
rz = row.rZ
delivered_time = index
dropped_index = np.logical_and(dropped_rewards['rX']==rx, dropped_rewards['rZ']==rz)
dropped = dropped_rewards[dropped_index]
if len(dropped) > 0:
dropped_time = dropped.index[0]
dropped_times.append(dropped_time)
delivered_times.append(delivered_time)
time_taken_to_deliver.append((delivered_time-dropped_time).total_seconds())
print(f"Total dropped rewards = {len(dropped_times)}")
print(f"Total delivered rewards = {len(delivered_times)}")
# iterate through the list of dropped and delivered times pulling out the x and y
# segments
x = self.getX()
y = self.getZ()
xmin = np.abs(np.min(x))
ymin = np.abs(np.min(y))
tortuosity = []
for dropped, delivered in zip(dropped_times, delivered_times):
start = df.index.searchsorted(dropped) # returns integer index into df
end = df.index.searchsorted(delivered)
sub_df = df[start:end]
x = np.array(sub_df.PosX)
y = | np.array(sub_df.PosY) | numpy.array |
import pytest
import numpy
import io
from gym.spaces import Box
from unittest import mock
def test_attributes(tennis_env):
assert tennis_env.action_space.n == 2 ** 2
assert tennis_env.observation_space == Box(low=1., high=float('Inf'), shape=(1, 2))
assert tennis_env.starting_bank == 10
assert tennis_env.balance == tennis_env.starting_bank
assert tennis_env.current_step == 0
assert numpy.array_equal(tennis_env.bet_size_matrix, numpy.ones(shape=(1, 2)))
assert numpy.array_equal(tennis_env.players, numpy.array([['<NAME>.', '<NAME>.'],
['<NAME>.', '<NAME>.']]))
@pytest.mark.parametrize("action,expected_reward", [(0, 0),
(1, -1),
(2, 0.11),
(3, -0.89)])
def test_step(tennis_env, action, expected_reward):
odds, reward, done, _ = tennis_env.step(action)
| numpy.testing.assert_almost_equal(reward, expected_reward, 2) | numpy.testing.assert_almost_equal |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from onnx_tf.backend import prepare
from onnx import helper
from onnx import TensorProto
from onnx_tf.common.legacy import legacy_onnx_pre_ver
class TestModel(unittest.TestCase):
""" Tests for models
"""
def _get_rnd(self, shape, low=-1.0, high=1.0):
return np.random.uniform(low, high, np.prod(shape)) \
.reshape(shape) \
.astype(np.float32)
def test_relu_node_inplace(self):
X = np.random.randn(3, 2).astype(np.float32)
Y_ref = np.clip(X, 0, np.inf)
node_def = helper.make_node("Relu", ["X"], ["X1"])
graph_def = helper.make_graph(
[node_def],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
outputs=[
helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2])
])
tf_rep = prepare(helper.make_model(graph_def))
output = tf_rep.run({"X": X})
np.testing.assert_almost_equal(output.X1, Y_ref)
def test_initializer(self):
if legacy_onnx_pre_ver(1, 2):
raise unittest.SkipTest(
"The current version of ONNX does not record correctly the opset of Cast."
)
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
weight = np.array([[1, 0], [0, 1]])
graph_def = helper.make_graph(
[
helper.make_node("Add", ["X", "Y"], ["Z0"]),
helper.make_node("Cast", ["Z0"], ["Z"], to=TensorProto.FLOAT),
helper.make_node("Mul", ["Z", "weight"], ["W"]),
helper.make_node("Tanh", ["W"], ["W1"]),
helper.make_node("Sigmoid", ["W1"], ["W2"])
],
name="test_initializer",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2)),
helper.make_tensor_value_info("weight", TensorProto.FLOAT, (2, 2)),
],
outputs=[
helper.make_tensor_value_info("W2", TensorProto.FLOAT, (2, 2))
],
initializer=[
helper.make_tensor("weight", TensorProto.FLOAT, [2, 2],
weight.flatten().astype(float))
])
def sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
#
# Copyright 2016-2018 Games Creators Club
#
# MIT License
#
import math
import time
import telemetry
import traceback
import numpy
import cv2
import PIL
import PIL.Image
from PIL import ImageDraw
import pyroslib
import pyroslib.logging
from pyroslib.logging import log, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG, LOG_LEVEL_ALWAYS
from rover import RoverState, normaiseAngle, angleDiference
from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID
MINIMUM_SPEED = 60
MIN_ANGLE = 0.5
MAX_ANGLE = 45
HEADING_MIN_DISTANCE = 150
WALL_SPEED = 210
CORNER_SPEED = 170
CORNER_CROSS_SPEED = 240
MAX_CORNER_DISTANCE = 700
pyroslib.logging.LOG_LEVEL = LOG_LEVEL_INFO
remotDebug = True
size = (80, 64)
class CameraData:
def __init__(self):
self.found = {'red': None, 'blue': None, 'yellow': None, 'green': None}
def reset(self):
self.found['red'] = None
self.found['blue'] = None
self.found['yellow'] = None
self.found['green'] = None
def hasAll(self):
return self.found['red'] is not None and self.found['blue'] is not None and self.found['yellow'] is not None and self.found['green'] is not None
def getFound(self):
return self.found
def foundAsString(self):
return " ".join([("" if v is None else str(v)) + ":" + k for k, v in self.found.items()])
def setData(self, colour, data):
if not self.hasAll():
self.found[colour] = data
for c in self.found:
if c != colour and self.found[c] == data:
self.found[c] = None
def missingColours(self):
return ", ".join([p for p in self.found if self.found[p] is None])
class WaitCameraData(Action):
def __init__(self, agent, next_action):
super(WaitCameraData, self).__init__(agent)
self.foundColours = agent.foundColours
self.next_action = next_action
self.started_scanning_time = None
def start(self):
self.started_scanning_time = time.time()
self.foundColours.reset()
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/raw/fetch", "")
pyroslib.publish("camera/wheels/raw/fetch", "")
pyroslib.publish("camera/camera1/raw/fetch", "")
pyroslib.publish("camera/camera2/raw/fetch", "")
self.agent.log_info("Started a wait for all camera data to arrive...")
def next(self):
if self.foundColours.hasAll():
self.agent.log_info("Scanning lasted " + ("{:7.3f}".format(time.time() - self.started_scanning_time)) + "!")
self.agent.log_info("Received all colours " + ("stopping" if self.next_action is None else "starting action " + str(self.next_action.getActionName())))
return self.next_action
return self
def execute(self):
self.agent.log_info("Waiting for sensor data to arrive...")
def getActionName(self):
return "Scan"
class NebulaAction(Action):
def __init__(self, agent, speed, next_action):
super(NebulaAction, self).__init__(agent)
self.speed = speed
self.next_action = next_action
self.direction_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.heading_pid = PID(0.3, 0, 0.01, 1, 0, diff_method=angleDiference)
self.distance_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.distance_error = 0
self.rover_speed = 0
self.required_corner_distance = 210
self.required_side_distance = 150
self.required_keeping_side_distance = 180
self.last_speed = 0
self.last_speed_time = 0
def obtainRoverSpeed(self):
self.rover_speed = self.rover.wheel_odos.averageSpeed() / 10
self.rover_speed = 25
def keepHeading(self):
state = self.rover.getRoverState()
# Keeping heading
heading = state.heading.heading
heading_output = -self.heading_pid.process(0, heading)
if -MIN_ANGLE < heading_output < MIN_ANGLE:
distance = 32000
else:
heading_fix_rad = heading_output * math.pi / 180
distance = self.rover_speed / heading_fix_rad
if 0 <= distance < HEADING_MIN_DISTANCE:
distance = HEADING_MIN_DISTANCE
elif -HEADING_MIN_DISTANCE < distance < 0:
distance = -HEADING_MIN_DISTANCE
return distance, heading_output
def keepDirection(self, requested_angle, setpoint_distance, current_distance):
state = self.rover.getRoverState()
# Keeping direction
angle_output = self.direction_pid.process(setpoint_distance, current_distance)
angle = 0
if abs(angle_output) < 1:
angle = 0
elif angle_output > 0 and angle_output > self.rover_speed:
angle = math.pi / 4
elif angle_output < 0 and angle_output < -self.rover_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(angle_output / self.rover_speed)
except BaseException as ex:
self.agent.log_always("Domain error")
if angle > MAX_ANGLE:
angle = MAX_ANGLE
elif angle < -MAX_ANGLE:
angle = -MAX_ANGLE
angle = int(requested_angle + angle * 180 / math.pi)
return angle, angle_output
def calculateSpeed(self, speed_time):
# Defining forward speed
if self.last_speed_time == speed_time:
return self.last_speed
if self.distance_error <= 0:
speed = -self.distance_error
if speed > self.speed:
speed = self.speed
elif speed < MINIMUM_SPEED:
speed = MINIMUM_SPEED
else:
speed = -self.distance_error
if speed > -MINIMUM_SPEED:
speed = -MINIMUM_SPEED
elif speed < -self.speed:
speed = -self.speed
self.last_speed = speed
self.last_speed_time = speed_time
return speed
def start(self):
super(NebulaAction, self).start()
# self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
# self.direction_pid = PID(0.20, 0, 0.005, 1, 0)
# self.heading_pid = PID(0.25, 0.0, 0.01, 1, 0, diff_method=angleDiference)
def end(self):
super(NebulaAction, self).end()
class GoToCornerKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, angle, next_action=None):
super(GoToCornerKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.angle = angle
self.prev_angle = angle - 45
self.next_angle = angle + 45
if self.prev_angle < 0:
self.prev_angle += 360
if self.next_angle >= 360:
self.next_angle -= 360
def hasRadar(self, state):
return state.radar.radar[self.prev_angle] > 1 and state.radar.radar[self.next_angle] > 1 and state.radar.radar[self.angle] > 1
def start(self):
super(GoToCornerKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.prev_angle) + " " + str(self.next_angle) + " " + str(self.angle))
self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
self.direction_pid = PID(0.20, 0, 0.02, 0.4, 0)
self.heading_pid = PID(0.25, 0.0, 0.01, 0.5, 0, diff_method=angleDiference)
self.agent.log_info("Starting Corner with prev_angle={: 3d} angle={: 3d} next_angle={: 3d}".format(self.prev_angle, self.angle, self.next_angle))
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar prev_angle[{0}]={1} angle[{2}]={3} next_angle[{4}]={5}".format(
self.prev_angle, int(state.radar.radar[self.prev_angle]) if state.radar.radar[self.prev_angle] is not None else "-",
self.angle, int(state.radar.radar[self.angle]) if state.radar.radar[self.angle] is not None else "-",
self.next_angle, int(state.radar.radar[self.next_angle]) if state.radar.radar[self.next_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
corner_distance = state.radar.radar[self.angle]
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
self.distance_error = self.distance_pid.process(self.required_corner_distance, corner_distance)
average_side = int((left_side + right_side) / 2)
if left_side > right_side:
ratio = left_side / right_side
else:
ratio = right_side / left_side
if corner_distance < self.required_corner_distance:
self.agent.log_info(
"reached corner distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
if average_side < self.required_side_distance:
self.agent.log_info(
"reached side distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
corner_distance = state.radar.radar[self.angle]
distance, heading_output = self.keepHeading()
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
angle, angle_output = self.keepDirection(self.angle, right_side, left_side)
speed = self.calculateSpeed(state.radar.time)
if corner_distance > MAX_CORNER_DISTANCE:
angle = self.angle
speed = CORNER_CROSS_SPEED
corner_distance = state.radar.radar[self.angle]
self.agent.log_info("rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
# distance = 32000
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Corner[{:3d}]".format(self.angle)
class FollowWallKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, wall_angle, direction_angle, next_action=None):
super(FollowWallKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.wall_angle = wall_angle
self.direction_angle = direction_angle
@staticmethod
def calculateRealDistance(side_distance, side_angle):
if side_distance < 1:
return 0
if side_angle > 180:
side_angle = 360 - side_angle
side_angle = side_angle * math.pi / 180
return math.sin(math.pi / 2 - side_angle) * side_distance
def hasRadar(self, state):
return state.radar.radar[self.wall_angle] > 1 and state.radar.radar[self.direction_angle] > 1
def start(self):
super(FollowWallKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.wall_angle) + " " + str(self.direction_angle))
self.distance_pid = PID(0.85, 0.1, 0.2, 0.8, 0)
self.direction_pid = PID(0.20, 0, 0.01, 0.6, 0)
self.heading_pid = PID(0.25, 0.02, 0.0, 1, 0, diff_method=angleDiference)
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar wall_angle[{0}]={1} direction_angle[{2}]={3}".format(
self.wall_angle, int(state.radar.radar[self.wall_angle]) if state.radar.radar[self.wall_angle] is not None else "-",
self.direction_angle, int(state.radar.radar[self.direction_angle]) if state.radar.radar[self.direction_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
wall_distance = state.radar.radar[self.wall_angle]
front_distance = state.radar.radar[self.direction_angle]
self.distance_error = self.distance_pid.process(self.required_side_distance, front_distance)
if front_distance < self.required_side_distance:
self.agent.log_info("reached distance rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wall_dist={: 5d} heading={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wall_distance),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
distance, heading_output = self.keepHeading()
wall_distance = self.calculateRealDistance(state.radar.radar[self.wall_angle], state.heading.heading)
if angleDiference(self.wall_angle, self.direction_angle) > 0:
angle, angle_output = self.keepDirection(self.direction_angle, wall_distance, self.required_keeping_side_distance)
else:
angle, angle_output = self.keepDirection(self.direction_angle, self.required_keeping_side_distance, wall_distance)
speed = self.calculateSpeed(state.radar.time)
front_distance = state.radar.radar[self.direction_angle]
self.agent.log_info("rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wall_dist={: 5d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wall_distance), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Wall[{0} on {1}]".format(self.direction_angle, self.wall_angle)
class CalculateRouteAction(Action):
def __init__(self, agent, speed, foundColours, next_action):
super(CalculateRouteAction, self).__init__(agent)
self.speed = speed
self.foundColours = foundColours
self.next_action = next_action
self.colour_order = ['red', 'blue', 'yellow', 'green']
log(LOG_LEVEL_INFO, "Colour order " + str(self.colour_order))
self.wait = 0
self.prepared_action = None
def calcualteAction(self, from_angle, to_colour):
to_angle = self.foundColours.found[to_colour]
colour_index = self.colour_order.index(to_colour)
if colour_index < 3:
following_action = self.calcualteAction(to_angle, self.colour_order[colour_index + 1])
else:
following_action = self.next_action
# follow_wall_speed = self.speed
# go_to_corner_speed = self.speed
follow_wall_speed = WALL_SPEED
go_to_corner_speed = CORNER_SPEED
if normaiseAngle(from_angle + 90) == to_angle:
wall_angle = normaiseAngle(from_angle + 45)
direction_angle = normaiseAngle(wall_angle + 90)
# return FollowWallKeepingHeadingAction(self.agent, self.speed, wall_angle, direction_angle, following_action)
return FollowWallKeepingHeadingAction(self.agent, follow_wall_speed, wall_angle, direction_angle, following_action)
elif normaiseAngle(from_angle - 90) == to_angle:
wall_angle = normaiseAngle(from_angle - 45)
direction_angle = normaiseAngle(wall_angle - 90)
# return FollowWallKeepingHeadingAction(self.agent, self.speed, wall_angle, direction_angle, following_action)
return FollowWallKeepingHeadingAction(self.agent, follow_wall_speed, wall_angle, direction_angle, following_action)
else:
# return GoToCornerKeepingHeadingAction(self, self.speed, to_angle, following_action)
return GoToCornerKeepingHeadingAction(self.agent, go_to_corner_speed, to_angle, following_action)
def next(self):
if self.wait == 0:
self.agent.log_info("Calculating route (1) -> Corner " + str(self.foundColours.found['red']))
initial_angle = self.foundColours.found['red']
following_action = self.calcualteAction(initial_angle, 'blue')
i = 1
a = following_action
while a != self.next_action:
i += 1
if isinstance(a, GoToCornerKeepingHeadingAction):
self.agent.log_info("Calculating route (" + str(i) + ") -> Corner " + str(a.angle))
a = a.next_action
else:
self.agent.log_info("Calculating route (" + str(i) + ") -> Follow wall " + str(a.wall_angle) + " to " + str(a.direction_angle))
a = a.next_action
self.prepared_action = GoToCornerKeepingHeadingAction(self.agent, self.speed, initial_angle, following_action)
self.wait = 2
self.rover.command(pyroslib.publish, 0, initial_angle, 32000)
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
else:
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
self.wait -= 1
if self.wait == 0:
return self.prepared_action
return self
def getActionName(self):
return "Calculate"
class StraightWheelsAction(Action):
def __init__(self, agent, next_action):
super(StraightWheelsAction, self).__init__(agent)
self.next_action = next_action
def next(self):
self.rover.command(pyroslib.publish, 0, 0, 3200)
return self.next_action
class NebulaAgent(AgentClass):
def __init__(self):
super(NebulaAgent, self).__init__("nebula")
self.foundColours = CameraData()
def connected(self):
super(NebulaAgent, self).connected()
pyroslib.subscribeBinary("camera/raw", self.handleCameraMain)
pyroslib.subscribeBinary("camera/wheels/raw", self.handleCameraWheels)
pyroslib.subscribeBinary("camera/camera1/raw", self.handleCamera1)
pyroslib.subscribeBinary("camera/camera2/raw", self.handleCamera2)
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
def start(self, data):
if not self.running:
if data[0] == 'nebula':
super(NebulaAgent, self).start(data)
# speed = int(data[1])
speed = 160
speed = 200
calculate_route_action = CalculateRouteAction(self, speed, self.foundColours, self.stop_action)
wait_camera_data_action = WaitCameraData(self, calculate_route_action)
wait_sensor_data_action = WaitSensorData(self, wait_camera_data_action)
# self.nextAction(wait_sensor_data_action)
self.nextAction(wait_camera_data_action)
elif data[0] == 'warmup':
# super(NebulaAgent, self).start(data)
self.nextAction(StraightWheelsAction(self, WaitSensorData(self, WarmupAction(self))))
elif data[0] == 'scan':
super(NebulaAgent, self).start(data)
self.nextAction(WaitCameraData(self, self.stop_action))
elif data[0] == 'combo':
super(NebulaAgent, self).start(data)
combo = data[1]
# go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, self.stop_action)
# follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 0, go_to_corner2_action)
# go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wall_action)
# follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner1_action)
# wait_sensor_data_action = WaitSensorData(self, follow_left_wall_action)
if combo == '1':
# Comb 1
go_to_corner3_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 315, self.stop_action)
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, go_to_corner3_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 45, follow_right_wall_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, go_to_corner2_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '2':
# Comb 2
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 0, self.stop_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner2_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wall_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '3':
# Comb 3
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, self.stop_action)
follow_top_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wall_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wall_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
else:
wait_sensor_data_action = WaitSensorData(self, self.stop_action)
self.nextAction(wait_sensor_data_action)
elif data[0] == 'walls':
super(NebulaAgent, self).start(data)
follow_bottom_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 180, 270, self.stop_action)
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, follow_bottom_wall_action)
follow_top_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wall_action)
wait_sensor_data_action = WaitSensorData(self, follow_left_wall_action)
self.nextAction(wait_sensor_data_action)
def handleCameraData(self, topic, message, source):
# now = time.time()
# delta = now - lastProcessed
# lastProcessed = now
pilImage = self._toPILImage(message)
openCVImage = numpy.array(pilImage)
result, value = self.processImageCV(openCVImage)
self.log_info("For " + str(source) + " got " + ("None" if result is None else str(result)) + " for value " + str(value))
if result is not None:
self.foundColours.setData(result, source)
if not self.foundColours.hasAll():
self.log_info("Found " + self.foundColours.foundAsString() + " but not finished yet as " + self.foundColours.missingColours() + " " + ("are" if len(self.foundColours.missingColours()) > 1 else "is") + " still missing.")
if self.running:
pyroslib.publish(topic + "/fetch", "")
pyroslib.publish("nebula/imagedetails", "working: " + self.foundColours.foundAsString())
else:
self.log_info("So far " + self.foundColours.foundAsString() + " and finishing...")
stopped = True
pyroslib.publish("nebula/imagedetails", "found: " + self.foundColours.foundAsString())
def handleCameraMain(self, topic, message, groups):
self.handleCameraData(topic, message, 225)
def handleCameraWheels(self, topic, message, groups):
self.handleCameraData(topic, message, 45)
def handleCamera1(self, topic, message, groups):
self.handleCameraData(topic, message, 315)
def handleCamera2(self, topic, message, groups):
self.handleCameraData(topic, message, 135)
@staticmethod
def _toPILImage(imageBytes):
pilImage = PIL.Image.frombytes("RGB", size, imageBytes)
return pilImage
def processImageCV(self, image):
def findColourNameHSV(hChannel, contour):
mask = numpy.zeros(hChannel.shape[:2], dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
mask = cv2.erode(mask, None, iterations=2)
maskAnd = hChannel.copy()
cv2.bitwise_and(hChannel, mask, maskAnd)
pyroslib.publish("nebula/processed", PIL.Image.fromarray(cv2.cvtColor(maskAnd, cv2.COLOR_GRAY2RGB)).tobytes("raw"))
self.log_debug("Published mask ")
hist = cv2.calcHist([hChannel], [0], mask, [255], [0, 255], False)
value = | numpy.argmax(hist) | numpy.argmax |
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
import sys
import math
import time
import pprint
import scipy.misc
import traceback
import random
import os
from skimage.transform import resize
class CvFuncs:
#plate control
_is_broad = False
#normal plates
_charHeightMin = 34
_charHeightMax = 58
_n_vertical_deviation = 25
#broad plates
_b_charHeightMin = 34
_b_charHeightMax = 58
_b_vertical_deviation = 30
#common filters
_min_char_width = 5
#the maximum width of the bounding box of detected contours... boxes such as the ones encompassing the whole plate are eliminated to
#not let them impact the weights and cogs and average widths
_box_width_max = 80
_neighbor_gap_max = 15 #how many pixels can the group of character's bounding rects be apart
_xDeviation = 10 #in pixels
_yDeviation = 10 #in pixels
debugEnabled = False #generates crazy amount of intermediate images which are useful for debugging
imageStoreDir = "" #location of images to store at
currentlyProcessedFileName = ""
#data type of records in the list
dtype_letter_rect = [('x',int),('y',int),('w',int),('h',int),('cogx',float),('cogy',float),('weight',int),('area',int)]
brects_unsorted = []
brects_sorted = []
processed_cogx_list = []
processed_cogy_list = []
potential_char_height_avg = 0
y_avg = 0
box_width_avg = 0
max_allowed_char_width = 0
eligible_box_area_avg = 0
_width = 0
_height = 0
average_color = 0
#timings
time_taken_by_skewCorrection = 0
time_taken_by_analyzeRects = 0
time_taken_by_breakupBoxesAndCalcWeights = 0
time_taken_by_eliminateByCog = 0
time_taken_by_eliminateByArea = 0
time_taken_by_determineBlackOrWhite = 0
time_taken_by_findAndAppendContours = 0
time_taken_by_extractLetters = 0
time_taken_by_findRectsNormalCase = 0
time_taken_by_assignNeighborWeights = 0
time_taken_by_eliminateByYDeviation = 0
#images
plate = None #the extracted plate region from the input image
thresh = None # this is the image we extract letters from
masked = None
white_bg = None
def reset(self):
self.brects_unsorted = []
self.brects_sorted = []
self.processed_cogx_list = []
self.potential_char_height_avg = 0
self.y_avg = 0
self.box_width_avg = 0
self.max_allowed_char_width = 0
self.eligible_box_area_avg = 0
self._width = 0
self._height = 0
self.average_color = 0
#timings
self.time_taken_by_skewCorrection = 0
self.time_taken_by_analyzeRects = 0
self.time_taken_by_breakupBoxesAndCalcWeights = 0
self.time_taken_by_eliminateByCog = 0
self.time_taken_by_eliminateByArea = 0
self.time_taken_by_determineBlackOrWhite = 0
self.time_taken_by_findAndAppendContours = 0
self.time_taken_by_extractLetters = 0
self.time_taken_by_findRectsNormalCase = 0
self.time_taken_by_assignNeighborWeights = 0
self.time_taken_by_eliminateByYDeviation = 0
#images
self.plate = None #the extracted plate region from the input image
self.thresh = None # this is the image we extract letters from
self.masked = None
self.white_bg = None
def makeIntermediateFileName(self, originalFilename, auxFileName):
return "{}/{}_{}.jpg".format(self.imageStoreDir, originalFilename, auxFileName)
def randomColor(self):
return (255*random.random(), 255*random.random(), 255*random.random())
def saveRoundImage(self, round, filename, forceSave = False):
"""Utility function for saving images with
highlighted brects_sorted drawn
"""
if not self.debugEnabled and forceSave is False:
return
round_img = self.plate.copy()
i = 0
while i < len(self.brects_sorted):
x,y,w,h,cogx,cogy,wr,area = self.brects_sorted[i]
cv2.rectangle(round_img, (x, y), (x + w, y + h), self.randomColor(), 1)
i = i + 1
#round_img_filename = "{}.round.{}.png".format(filename, round)
round_img_filename = self.makeIntermediateFileName(filename, round)
debugPath = os.path.join('.','debug')
if not os.path.exists(debugPath):
os.makedirs(debugPath)
filePath = os.path.join(debugPath, round_img_filename)
cv2.imwrite(filePath, round_img)
def correct_skew(self, image):
timeStart = time.time()
#copied from http://www.pyimagesearch.com/2017/02/20/text-skew-correction-opencv-python/
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(image)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# rotate the image to deskew it
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h),
flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
self.time_taken_by_skewCorrection = time.time() - timeStart
return rotated
def analyzeRects(self,filename):
"""Round 1
pre process - calculate averages, avg width, avg height etc
Gather statistics on the sorted rects for decision making
Filters out rects which do not fall between the valid character heights min
and max.
Call after all rects have been found
"""
timeStart = time.time()
potential_letters_y_sum =0
potential_letters_count = 0
potential_letters_height_sum =0
box_width_sum =0
box_width_count = 0
average_gap_sum = 0 #todo calculate and then exclude those which have more gap with the rest of the group
i = 0
while i < len(self.brects_sorted):
x,y,w,h,cogx,cogy,wr,area = self.brects_sorted[i]
valid_roi = False
if ((h >= self._charHeightMin and h <= self._charHeightMax) and w >= self._min_char_width):
valid_roi = True
if valid_roi and w <= self._box_width_max:
box_width_sum = box_width_sum + w
box_width_count = box_width_count + 1
potential_letters_y_sum = potential_letters_y_sum + y
potential_letters_height_sum = potential_letters_height_sum + ((y+h)-y)
potential_letters_count = potential_letters_count + 1
else:
#print("del {}".format(self.brects_sorted[i]))
self.brects_sorted = np.delete(self.brects_sorted, i)
continue
#process next
i = i+1
#avoid divide by 0 errors..
if potential_letters_count == 0:
potential_letters_count = 1
if box_width_count == 0:
box_width_count = 1
if len(self.brects_sorted) == 0:
return
self.potential_char_height_avg = int(potential_letters_height_sum/potential_letters_count)
self.y_avg = int(self._height / 2)
self.box_width_avg = int(box_width_sum / box_width_count)
# self.max_allowed_char_width = int(self.box_width_avg)
if(self.debugEnabled):
#save round image
self.saveRoundImage('analyze-rects',filename)
print("y_avg [{}] box_width_avg [{}] max_allowed_char_width[{}] potential_char_height_avg[{}]" .format(self.y_avg, self.box_width_avg, self.max_allowed_char_width, self.potential_char_height_avg))
print("Round 1 rects are: {}".format(len(self.brects_sorted)))
pprint.pprint(self.brects_sorted)
self.time_taken_by_analyzeRects = time.time() - timeStart
def eliminateByYDeviation(self, filename):
timeStart = time.time()
v_deviation = self._n_vertical_deviation
# if self._is_broad:
# v_deviation = self._b_vertical_deviation
imgh,imgw = self.thresh.shape
imgh = imgh // 2
i = 0
while i < len(self.brects_sorted):
x,y,w,h,cogx,cogy,wr,area = self.brects_sorted[i]
if self.debugEnabled:
print("y[{}] y_avg[{}] abs(y-y_avg)[{}] v_dev[{}] [{}]".format( y, self.y_avg, abs(y-self.y_avg), v_deviation, self.brects_sorted[i]))
# if (abs(y-self.y_avg) > v_deviation):
if abs(y-imgh) > 30:
#remove the rect as it is not what we are interested in
if self.debugEnabled:
print("del for y_deviation [{}] <--\n".format(self.brects_sorted[i]))
self.brects_sorted = np.delete(self.brects_sorted, i)
continue
#process next
i = i + 1
self.brects_sorted = np.sort(self.brects_sorted, order=['x'])
if self.debugEnabled:
#save round image
self.saveRoundImage('y_deviation',filename)
print("eliminateByYDeviation leaving with [{}] rects".format(len(self.brects_sorted)))
pprint.pprint(self.brects_sorted)
self.time_taken_by_eliminateByYDeviation = time.time() - timeStart
def breakupBoxesAndCalcWeights(self,filename):
"""Round 2
pre process - breakup any wider boxes into smaller ones of average char width
and calculate weights based on how close a neighbor, for consecutive letters, the
gap between (x+w) and w of next box must be less than "padding"
"""
timeStart = time.time()
eligible_box_area_sum = 0
eligible_box_count = 0
i = 0
while i < len(self.brects_sorted):
x,y,w,h,cogx,cogy,wr,area = self.brects_sorted[i]
#outright discard boxes > 3 x max_allowed_char_width as that's noise
if (w > 3*self.max_allowed_char_width):
if self.debugEnabled:
print("Round 2 - del for 3*max_allowed_char_width({}) [{}]\n".format(3*self.max_allowed_char_width,self.brects_sorted[i]))
self.brects_sorted = np.delete(self.brects_sorted, i)
continue
if(h<20):
if self.debugEnabled:
print("h<20 [{}]\n".format(self.brects_sorted[i]))
self.brects_sorted = np.delete(self.brects_sorted, i)
continue
if (w > self.max_allowed_char_width):
# if (w > h):
boxes_to_breakup_into = 2
#width_per_box = w / boxes_to_breakup_into
width_per_box = w / 2
#print("w[{}] max_w[{}] parts[{}] - box [{},{},{},{}]".format(w,2, boxes_to_breakup_into, x,y,w,h))
if boxes_to_breakup_into > 1:
#remove this box from brects_sorted
self.brects_sorted = np.delete(self.brects_sorted, i)
for ibox in range(0, boxes_to_breakup_into):
#print( "new region x[{}] y[{}] w[{}] h[{}]\n".format(x+ (ibox*width_per_box), y, width_per_box,h))
nx = x+ (ibox*width_per_box)
ncogx = (nx + (nx+width_per_box))/2
ncogy = (y + (y+h))/2
self.brects_sorted = np.append(
self.brects_sorted,
np.array([
(nx, y, width_per_box,h,ncogx,cogy,0,width_per_box*h)
], dtype=self.dtype_letter_rect)
)
#dont increment index as current was deleted and the next one
#is already in it's place
continue
else: #see below... increment to next element
eligible_box_area_sum = eligible_box_area_sum + (w*h)
eligible_box_count = eligible_box_count + 1
else:
eligible_box_area_sum = eligible_box_area_sum + (w*h)
eligible_box_count = eligible_box_count + 1
#process next
i = i + 1
self.brects_sorted = np.sort(self.brects_sorted, order=['x'])
#avoid divide by 0 errors
if eligible_box_count ==0:
eligible_box_count = eligible_box_count + 1
self.eligible_box_area_avg = eligible_box_area_sum/eligible_box_count
if self.debugEnabled:
#save round image
self.saveRoundImage('newRects',filename)
print("breakupBoxesAndCalcWeights rects are: {}".format(len(self.brects_sorted)))
pprint.pprint(self.brects_sorted)
self.time_taken_by_breakupBoxesAndCalcWeights = time.time() - timeStart
def cog_doElimination(self,filename):
#sort by width so that smaller boxes are kept and larger are eliminated
# self.brects_sorted=np.sort(self.brects_sorted, order=['w'])
i = 0
while i < len(self.brects_sorted):
x,y,w,h,cogx,cogy,wr,area = self.brects_sorted[i]
if self.debugEnabled:
print("Comparing [{}][{}]@[{}]".format(cogx, cogy, self.brects_sorted[i]))
j = i+1
while j < len(self.brects_sorted):
x_j,y_j,w_j,h_j,cogx_j,cogy_j,wr_j,area_j = self.brects_sorted[j]
if self.debugEnabled:
print("\t with [{}][{}]@[{}]".format(cogx_j, cogy_j, self.brects_sorted[j]))
found_gx = False
found_gy = False
if abs(cogx_j-cogx) <= self._xDeviation:
found_gx = True
if abs(cogy_j-cogy) <= self._yDeviation:
found_gy = True
if found_gx and found_gy:
if self.debugEnabled:
print("deleted (j) cog[{}][{}]@[{}] <--\n".format(cogx,cogy, self.brects_sorted[j]))
self.brects_sorted = np.delete(self.brects_sorted, j)
#break
else:
j = j+1
i = i + 1
# #restore ordering to order by x
# self.brects_sorted=np.sort(self.brects_sorted, order=['x'])
if self.debugEnabled:
#save round image
self.saveRoundImage('cog',filename)
print("[cog] round rects are: {}".format(len(self.brects_sorted)))
pprint.pprint(self.brects_sorted)
def eliminateByCog(self, filename):
#print("eliminateByCog entered")
timeStart = time.time()
self.cog_doElimination(filename)
self.time_taken_by_eliminateByCog = self.time_taken_by_eliminateByCog + (time.time() - timeStart)
#print("eliminateByCog exiting")
def doRectsOverlap(self, r1l,r1r,r1t,r1b, r2l, r2r, r2t, r2b):
"""
https://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
"""
return (r1l < r2r and r1r > r2l and r1t > r2b and r1b < r2t)
def eliminateByOverlap(self):
"""
Compares every rect with others and
Discards one of the two rects with larger area
"""
#print("eliminateByOverlap entered")
#we make copy of the brects_sorted as we will be sorting by area
sorted_rects = np.sort(self.brects_sorted, order=['area'])
i = 0
while i < len(sorted_rects)-1:
x1,y1,w1,h1,cogx1,cogy1,wr1,area1 = sorted_rects[i]
x2,y2,w2,h2,cogx2,cogy2,wr2,area2 = sorted_rects[i+1]
#print("eliminateByOverlap entered 2")
#eliminated = False
if self.doRectsOverlap(x1,x1+w1, y1, y1+h1, x2, x2+w2, y2, y2+h2):
# eliminated = True
msg = "Deleting rect at: "
#eliminate the larger of the two rects
if area1 > area2:
sorted_rects = np.delete(sorted_rects, i)
msg = msg + str(i)
else:
sorted_rects = np.delete(sorted_rects, i+1)
msg = msg + str(i+1)
i = i + 1 #process next
if self.debugEnabled:
print(msg)
else:
i = i+1
#restore x sorted array
self.brects_sorted = np.sort(sorted_rects, order=['x'])
#print("eliminateByOverlap exiting")
if self.debugEnabled:
#save round image
self.saveRoundImage('overlap',self.currentlyProcessedFileName)
print("eliminateByOverlap rects are: {}".format(len(self.brects_sorted)))
pprint.pprint(self.brects_sorted)
def filterPreExtract(self):
"""
Removes empty white only boxes
Removes boxes which have a height less that avg(h)-threshold
"""
print("filterPreExtract entered")
#we make copy of the brects_sorted as we will be sorting by area
sorted_rects = | np.sort(self.brects_sorted, order=['area']) | numpy.sort |
#VoidFinder Function to do just about everything
import numpy as np
from astropy.table import Table
import time
from .hole_combine import combine_holes, combine_holes_2
from .voidfinder_functions import mesh_galaxies, \
in_mask, \
not_in_mask, \
in_survey, \
save_maximals, \
mesh_galaxies_dict
#build_mask, \
from .table_functions import add_row, \
subtract_row, \
to_vector, \
to_array, \
table_dtype_cast, \
table_divide
from .volume_cut import volume_cut, check_hole_bounds
from .avsepcalc import av_sep_calc
from .mag_cutoff_function import mag_cut, field_gal_cut
from ._voidfinder import _hole_finder
from .constants import c
from ._voidfinder_cython import check_mask_overlap
################################################################################
DtoR = np.pi/180.
RtoD = 180./np.pi
################################################################################
def filter_galaxies(galaxy_table,
survey_name,
out_directory,
mag_cut=True,
dist_limits=None,
rm_isolated=True,
write_table=True,
sep_neighbor=3,
dist_metric='comoving',
h=1.0,
hole_grid_edge_length=5.0,
magnitude_limit=-20.09,
verbose=0):
"""
A hodge podge of miscellaneous tasks which need to be done to format the data into
something the main find_voids() function can use.
1) Optional magnitude cut
2) Convert from ra-dec-redshift space into xyz space
3) Calculate the hole search grid shape
4) Optional remove isolated galaxies by partitioning them into wall (non-isolated)
and field (isolated) groups
5) Optionally write out the wall and field galaxies to disk
Parameters
==========
galaxy_table : astropy.table of shape (N,?)
variable number of required columns. If doing magnitude cut, must include
'rabsmag' column. If distance metric is 'comoving', must include 'Rgal'
column, otherwise must include 'redshift'. Also must always include 'ra'
and 'dec'
survey_name : str
Name of the galxy catalog, string value to prepend or append to output names
out_directory : string
Directory path for output files
mag_cut : bool
whether or not to cut on magnitude, removing galaxies less than
magnitude_limit
dist_limits : list of length 2
[Minimum distance, maximum distance] of galaxy sample (in units of Mpc/h)
magnitude_limit : float
value at which to perform magnitude cut
rm_isolated : bool
whether or not to perform Nth neighbor distance calculation, and use it
to partition the input galaxies into wall and field galaxies
write_table : bool
use astropy.table.Table.write to write out the wall and field galaxies
to file
sep_neighbor : int, positive
if rm_isolated_flag is true, find the Nth galaxy neighbors based on this value
dist_metric : str
Distance metric to use in calculations. Options are 'comoving'
(default; distance dependent on cosmology) and 'redshift' (distance
independent of cosmology).
h : float
Fractional value of Hubble's constant. Default value is 1 (where
H0 = 100h).
hole_grid_edge_length : float
length in Mpc/h of the edge of one cell of a grid cube for the search grid
verbose : int
values greater than zero indicate to print output
Returns
=======
wall_gals_xyz : numpy.ndarray of shape (K,3)
the galaxies which were designated not to be isolated
field_gals_xyz : numpy.ndarray of shape (L,3)
the galaxies designated as isolated
hole_grid_shape : tuple of 3 integers (i,j,k)
shape of the hole search grid
coords_min : numpy.ndarray of shape (3,)
coordinates of the minimum of the survey used for converting from
xyz space into ijk space
"""
print('Filter Galaxies Start', flush=True)
############################################################################
# PRE-PROCESS DATA
# Filter based on magnitude and convert from redshift to radius if necessary
#---------------------------------------------------------------------------
# Remove faint galaxies
if mag_cut:
galaxy_table = galaxy_table[galaxy_table['rabsmag'] <= magnitude_limit]
# Remove galaxies outside redshift range
if dist_limits is not None:
if dist_metric == 'comoving':
distance_boolean = np.logical_and(galaxy_table['Rgal'] >= dist_limits[0],
galaxy_table['Rgal'] <= dist_limits[1])
else:
H0 = 100*h
distance_boolean = np.logical_and(c*galaxy_table['redshift']/H0 >= dist_limits[0],
c*galaxy_table['redshift']/H0 <= dist_limits[1])
galaxy_table = galaxy_table[distance_boolean]
# Convert galaxy coordinates to Cartesian
coords_xyz = ra_dec_to_xyz(galaxy_table, dist_metric, h)
############################################################################
############################################################################
# Grid shape
#---------------------------------------------------------------------------
hole_grid_shape, coords_min = calculate_grid(coords_xyz,
hole_grid_edge_length)
############################################################################
############################################################################
# Separation
#---------------------------------------------------------------------------
if rm_isolated:
wall_gals_xyz, field_gals_xyz = wall_field_separation(coords_xyz,
sep_neighbor=sep_neighbor,
verbose=verbose)
else:
wall_gals_xyz = coords_xyz
field_gals_xyz = | np.array([]) | numpy.array |
# Author: <NAME> at 24/09/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
# Author: <NAME> at 06/08/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
import numpy as np
from numpy.testing import assert_array_almost_equal
from reservoirpy.nodes import Ridge, Reservoir
def test_ridge_init():
node = Ridge(10, ridge=1e-8, transient=100)
data = np.ones((1, 100))
res = node(data)
assert node.Wout.shape == (100, 10)
assert node.bias.shape == (1, 10)
assert node.ridge == 1e-8
assert node.transient == 100
data = np.ones((10000, 100))
res = node.run(data)
assert res.shape == (10000, 10)
def test_ridge_partial_fit():
node = Ridge(10, ridge=1e-8, transient=10)
X, Y = np.ones((5, 200, 100)), np.ones((5, 200, 10))
res = node.fit(X, Y)
assert node.Wout.shape == (100, 10)
assert_array_almost_equal(node.Wout, | np.ones((100, 10)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 17:32:25 2018
@author:
<NAME>
MIT Kavli Institute for Astrophysics and Space Research,
Massachusetts Institute of Technology,
77 Massachusetts Avenue,
Cambridge, MA 02109,
USA
Email: <EMAIL>
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
#::: modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import os, sys
from datetime import datetime
#import warnings
import emcee
try:
import celerite
from celerite import terms
except:
pass
# warnings.warn('Module "celerite" could not be imported. Some functionality might not be available.')
try:
import george
from george import kernels
except:
pass
# warnings.warn('Module "george" could not be imported. Some functionality might not be available.')
import corner
from multiprocessing import Pool, cpu_count
from contextlib import closing
from tqdm import tqdm
#::: lightcurves modules
from . import index_transits, index_eclipses, phase_fold, rebin_err, get_first_epoch
np.random.seed(21)
###############################################################################
#::: call the gp
###############################################################################
def call_gp(params):
log_sigma, log_rho, log_error_scale = params
if GP_CODE=='celerite':
kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
gp = celerite.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr),
gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
return gp
elif GP_CODE=='george':
kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
gp = george.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr),
gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
return gp
else:
raise ValueError('A bad thing happened.')
###############################################################################
#::: priors
###############################################################################
def external_log_prior(params):
log_sigma, log_rho, log_error_scale = params
lp = 0
if not (-15 < log_sigma < 15):
lp = -np.inf
if not (-15 < log_rho < 15):
lp = -np.inf
if not (-15 < log_error_scale < 0):
lp = -np.inf
return lp
###############################################################################
#::: set up MCMC log probability function
#::: (has to be top-level for pickle)
###############################################################################
def log_probability(params):
'''
works on xx, yy
'''
# log_sigma, log_rho, log_error_scale = params
try:
gp = call_gp(params)
ll = gp.log_likelihood(yy)
lp = gp.log_prior() + external_log_prior(params)
except:
return -np.inf
if not np.isfinite(lp):
return -np.inf
return ll + lp
###############################################################################
#::: run
###############################################################################
def gp_decor(x,y,
yerr=None,
ind_in=None, ind_out=None,
period=None, epoch=None, width=None, width_2=None,
secondary_eclipse=False,
systematics_amplitude=None,
systematics_timescale=None,
mean=1.,
nwalkers=50, thin_by=50, burn_steps=2500, total_steps=5000,
bin_width=None,
gp_code='celerite', kernel='Matern32',
method='median_posterior', chunk_size=2000, Nsamples_detr=10, Nsamples_plot=10,
xlabel='x', ylabel='y', ydetr_label='ydetr',
outdir='gp_decor', fname=None, fname_summary=None,
multiprocess=False, multiprocess_cores=None,
figstretch=1, rasterized=True):
'''
Required Input:
---------------
x : array of float
x-values of the data set
y : array of float
y-values of the data set
Optional Input:
---------------
yerr : array of float / float
errorbars on y-values of the data set;
if None, these are estimated as std(y);
this is only needed to set an initial guess for the GP-fit;
white noise is fitted as a jitter term
period : float
period of a potential transit signal
if None, no transit region will be masked
epoch : float
epoch of a potential transit signal
if None, no transit region will be masked
width : float
width of the transit/primary eclipse region that should be masked (should be greater than the signal's width)
if None, no transit region will be masked
width_2 : float
width of the secondary region that should be masked (should be greater than the signal's width)
if None, no transit region will be masked
secondary_eclipse : bool
mask a secondary eclipse
(currently assumes a circular orbit)
systematics_timescale : float (defaut None)
the timescale of the systeamtics
must be in the same units as x
if None, set to 1. (assuming usually x is in days, 1. day is reasonable)
mean : float (default 1.)
mean of the data set
the default is 1., assuming usually y will be normalized flux
nwalkers : int
number of MCMC walkers
thin_by : int
thinning the MCMC chain by how much
burn_steps : int
how many steps to burn in the MCMC
total_steps : int
total MCMC steps (including burn_steps)
bin_width : float (default None)
run the GP on binned data and then evaluate on unbinned data
(significant speed up for george)
currently a bit buggy
gp_code : str (default 'celerite')
'celerite' or 'george'
which GP code to use
method : str (default 'median_posterior')
how to calculate the GP curve that's used for detrending
'mean_curve' : take Nsamples_detr and calculate many curves, detrend by the mean of all of them
'median_posterior' : take the median of the posterior and predict a single curve
chunk_size : int (default 5000)
calculate gp.predict in chunks of the entire light curve (to not crash memory)
Nsamples_detr : float (default 10)
only used if method=='mean_curve'
how many samples used for detrending
Nsampels_plot : float (default 10)
only used if method=='mean_curve'
how many samples used for plotting
xlabel : str
x axis label (for plots)
ylabel : str
y axis label (for plots)
ydetr_label : str
y_detr axis label (for plots)
outdir : str
name of the output directory
fname : str
prefix of the output files (e.g. a planet name)
multiprocess : bool (default True)
run MCMC on many cores
'''
if (gp_code=='celerite') & ('celerite' not in sys.modules):
raise ValueError('You are trying to use "celerite", but it is not installed.')
elif (gp_code=='george') & ('george' not in sys.modules):
raise ValueError('You are trying to use "george", but it is not installed.')
#::: make it luser proof and recalculate the true first epoch
if not any(v is None for v in [period, epoch, width]):
epoch = get_first_epoch(x, epoch, period)
#TODO: philosophical question:
#use median of the posterior to get 1 "median" GP for detrending?
#or use mean (and std) curves of many samples from the GP for detrending?
#it definitely is way faster to simply use the "median"...
#::: this is ugly, I know;
#::: blame the multiprocessing and pickling issues,
#::: which demand global variables for efficiency
global xx
global yy
global yyerr
global err_norm
global GP_CODE
global MEAN
GP_CODE = gp_code
MEAN = mean
#::: outdir
if not os.path.exists(outdir): os.makedirs(outdir)
#::: print function that prints into console and logfile at the same time
now = datetime.now().isoformat()
def logprint(*text):
print(*text)
original = sys.stdout
with open( os.path.join(outdir,fname+'logfile_'+now+'.log'), 'a' ) as f:
sys.stdout = f
print(*text)
sys.stdout = original
#::: fname
if fname is not None:
fname += '_gp_decor_'
else:
fname = 'gp_decor_'
#::: MCMC plot settings
if kernel=='Matern32':
keys = ['gp_log_sigma', 'gp_log_rho', 'log_y_err']
names = [r'gp: $\log{\sigma}$', r'gp: $\log{\rho}$', r'$\log{(y_\mathrm{err})}$']
elif kernel=='SHOT':
keys = ['gp_log_S0', 'gp_log_Q', 'log_omega0', 'log_y_err']
names = [r'gp: $\log{S_0}$', r'gp: $\log{Q}$', r'gp: $\log{\omega_0}$', r'$\log{(y_\mathrm{err})}$']
celerite.terms.SHOTerm
discard = int(1.*burn_steps/thin_by)
#::: phase-plot settings
dt=1./1000.
ferr_type='meansig'
ferr_style='sem'
sigmaclip=True
logprint('\nStarting...')
#::: guess yerr if not given
if yerr is None:
yerr = np.nanstd(y) * np.ones_like(y)
#::: mask transit if required
#::: if ind_in and ind_out are given, use these
#::: otherwise, check if period, epoch and width are given
if (ind_in is None) and (ind_out is None):
if any(v is None for v in [period, epoch, width]):
ind_in = []
ind_out = slice(None) #mark all data points as out of transit (i.e. no transit masked)
else:
if secondary_eclipse is True:
ind_ecl1, ind_ecl2, ind_out = index_eclipses(x, epoch, period, width, width_2)
ind_in = list(ind_ecl1)+list(ind_ecl2)
else:
ind_in, ind_out = index_transits(x, epoch, period, width)
xx = x[ind_out]
yy = y[ind_out]
yyerr = yerr[ind_out]
#::: binning
if bin_width is not None:
bintime_out, bindata_out, bindata_err_out, _ = rebin_err(xx, yy, ferr=yyerr, dt=bin_width, ferr_type='meansig', sigmaclip=True, ferr_style='sem' )
xx = bintime_out
yy = bindata_out
yyerr = bindata_err_out
#::: save settings
if not os.path.exists(outdir): os.makedirs(outdir)
header = 'period,epoch,width,secondary_eclipse,'+\
'nwalkers,thin_by,burn_steps,total_steps'
X = np.column_stack(( period, epoch, width, secondary_eclipse, nwalkers, thin_by, burn_steps, total_steps ))
np.savetxt( os.path.join(outdir,fname+'settings.csv'), X, header=header, delimiter=',', fmt='%s')
#::: plot the data
fig, ax = plt.subplots(figsize=(6*figstretch,4))
ax.errorbar(x[ind_out], y[ind_out], yerr=yerr[ind_out], fmt=".b", capsize=0, rasterized=rasterized)
ax.errorbar(x[ind_in], y[ind_in], yerr=yerr[ind_in], fmt=".", color='skyblue', capsize=0, rasterized=rasterized)
ax.set( xlabel=xlabel, ylabel=ylabel, title='Original data' )
fig.savefig( os.path.join(outdir,fname+'data.pdf'), bbox_inches='tight')
plt.close(fig)
if bin_width is not None:
fig, ax = plt.subplots(figsize=(6*figstretch,4))
ax.errorbar(xx, yy, yerr=yyerr, fmt=".b", capsize=0, rasterized=rasterized)
ax.set( xlabel=xlabel, ylabel=ylabel, title='Original data (binned)' )
fig.savefig( os.path.join(outdir,fname+'data_binned.pdf'), bbox_inches='tight')
plt.close(fig)
# err
# #::: set up the GP model
## kernel = terms.RealTerm(log_a=1., log_c=1.) + terms.JitterTerm(log_sigma=np.log(yerr))
# kernel = terms.Matern32Term(log_sigma=1., log_rho=1.) + terms.JitterTerm(log_sigma=np.log(yerr))
# gp = celerite.GP(kernel, mean=mean) #log_white_noise=np.log(yerr),
# gp.compute(xx, yerr=yerr)
## logprint("Initial log-likelihood: {0}".format(gp.log_likelihood(y)))
#::: plot grid
t = np.linspace(np.min(x), np.max(x), 2000)
# ###########################################################################
# #::: MLE fit
# ###########################################################################
# logprint 'Running MLE fit...'
#
# #::: define a cost function
# def neg_log_like(params, yy, gp):
# gp.set_parameter_vector(params)
# return -gp.log_likelihood(yy)
#
# def grad_neg_log_like(params, yy, gp):
# gp.set_parameter_vector(params)
# return -gp.grad_log_likelihood(yy)[1]
#
#
# #::: run the MLE fit
# initial_params = gp.get_parameter_vector()
## logprint initial_params
# bounds = gp.get_parameter_bounds()
# soln = minimize(neg_log_like, initial_params, jac=grad_neg_log_like,
# method="L-BFGS-B", bounds=bounds, args=(yy, gp))
# gp.set_parameter_vector(soln.x)
## logprint("Final log-likelihood: {0}".format(-soln.fun))
## logprint soln.x
#
#
# #::: evaluate MLE curve
# mu, var = gp.predict(yy, t, return_var=True)
# std = np.sqrt(var)
#
#
# #::: plot the data and MLE fit
# color = 'r' #"#ff7f0e"
# fig, ax = plt.subplots()
# ax.errorbar(xx, yy, yerr=np.exp(soln.x[2]), fmt="b.", capsize=0)
# ax.errorbar(x[ind_in], y[ind_in], yerr=np.exp(soln.x[2]), fmt=".", color='skyblue', capsize=0)
# ax.plot(t, mu, color='r', zorder=11)
# ax.fill_between(t, mu+std, mu-std, color='r', alpha=0.3, edgecolor="none", zorder=10)
# ax.set( xlabel=xlabel, ylabel=ylabel, title="MLE prediction");
# fig.savefig( os.path.join(outdir,fname+'MLE_fit.jpg'), dpi=100, bbox_inches='tight')
#
# #::: delete that gp instance
# del gp
###########################################################################
#::: MCMC fit
###########################################################################
if multiprocess and not multiprocess_cores:
multiprocess_cores = cpu_count()-1
logprint('\nRunning MCMC fit...')
if multiprocess: logprint('\tRunning on', multiprocess_cores, 'CPUs.')
#::: initial guesses
#::: log(sigma)
if systematics_amplitude is not None:
log_sigma_init = np.log(systematics_amplitude)
else:
log_sigma_init = np.log( | np.nanstd(yy) | numpy.nanstd |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 05:16:40 2018
@author: <NAME>
"""
import matplotlib.pyplot as plt # import matplotlib
import numpy as np # import numpy
import scipy as sp # import scipy
from scipy import sparse # import sparse module from scipy
import networkx as nx # import networkx
# code for generating connectivity matrix from a connectivity list, for small or near-full network
def calculate_network_mat(net_list):
len_net = | np.amax(net_list) | numpy.amax |
"""
This module to performs linear analysis of coupled lattices.
Notation is the same as in reference [3]
In case some strange results appear in phase advances or beta functions,
the reading of [2] is encouraged, since the authors discuss subtleties not
addressed here for strong coupled motion.
References:
[1] <NAME>., & <NAME>. (1973). Parametrization of Linear
Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear
Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279
[2] <NAME>., & <NAME>. (1999). Linear analysis of coupled
lattices. Physical Review Special Topics - Accelerators and Beams,
2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001
[3] <NAME>, Some Useful Linear Coupling Approximations.
C-A/AP/#101 Brookhaven Nat. Lab. (July 2003)
"""
import numpy as _np
from mathphys.functions import get_namedtuple as _get_namedtuple
from .. import lattice as _lattice
from .. import tracking as _tracking
from ..utils import interactive as _interactive
from .miscellaneous import OpticsException as _OpticsException
class EdwardsTeng(_np.record):
"""Edwards and Teng decomposition of the transfer matrices.
Notation is the same as in reference [3].
In case some strange results appear in phase advances or beta functions,
the reading of [2] is encouraged, since the authors discuss subtleties not
addressed here for strong coupled motion.
References:
[1] <NAME>., & <NAME>. (1973). Parametrization of Linear
Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear
Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279
[2] <NAME>., & <NAME>. (1999). Linear analysis of coupled
lattices. Physical Review Special Topics - Accelerators and Beams,
2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001
[3] <NAME>, Some Useful Linear Coupling Approximations.
C-A/AP/#101 Brookhaven Nat. Lab. (July 2003)
Contains the decomposed parameters:
spos (array, len(indices)x2x2) : longitudinal position [m]
beta1 (array, len(indices)) : beta of first eigen-mode
beta2 (array, len(indices)) : beta of second eigen-mode
alpha1 (array, len(indices)) : alpha of first eigen-mode
alpha2 (array, len(indices)) : alpha of second eigen-mode
gamma1 (array, len(indices)) : gamma of first eigen-mode
gamma2 (array, len(indices)) : gamma of second eigen-mode
mu1 (array, len(indices)): phase advance of the first eigen-mode
mu2 (array, len(indices)): phase advance of the second eigen-mode
W (array, len(indices)x2x2) : matrices W in [3]
"""
DTYPE = '<f8'
ORDER = _get_namedtuple('Order', field_names=[
'spos',
'beta1', 'alpha1', 'mu1',
'beta2', 'alpha2', 'mu2',
'W_11', 'W_12', 'W_21', 'W_22',
'eta1', 'etap1', 'eta2', 'etap2',
'rx', 'px', 'ry', 'py', 'de', 'dl'])
def __setattr__(self, attr, val):
"""."""
if attr == 'co':
self._set_co(val)
else:
super().__setattr__(attr, val)
def __str__(self):
"""."""
rst = ''
rst += 'spos : '+'{0:+10.3e}'.format(self.spos)
fmt = '{0:+10.3e}, {1:+10.3e}'
rst += '\nrx, ry : '+fmt.format(self.rx, self.ry)
rst += '\npx, py : '+fmt.format(self.px, self.py)
rst += '\nde, dl : '+fmt.format(self.de, self.dl)
rst += '\nmu1, mu2 : '+fmt.format(self.mu1, self.mu2)
rst += '\nbeta1, beta2 : '+fmt.format(self.beta1, self.beta2)
rst += '\nalpha1, alpha2: '+fmt.format(self.alpha1, self.alpha2)
rst += '\neta1, eta2 : '+fmt.format(self.eta1, self.eta2)
rst += '\netap1, etap2 : '+fmt.format(self.etap1, self.etap2)
return rst
@property
def co(self):
"""Closed-Orbit in XY plane coordinates.
Returns:
numpy.ndarray (6, ): 6D phase space point around matrices were
calculated.
"""
return _np.array([
self.rx, self.px, self.ry, self.py, self.de, self.dl])
@property
def W(self):
"""2D mixing matrix from ref [3].
Returns:
numpy.ndarray (2x2): W matrix from ref [3].
"""
return _np.array([[self.W_11, self.W_12], [self.W_21, self.W_22]])
@W.setter
def W(self, val):
self[EdwardsTeng.ORDER.W_11] = val[0, 0]
self[EdwardsTeng.ORDER.W_12] = val[0, 1]
self[EdwardsTeng.ORDER.W_21] = val[1, 0]
self[EdwardsTeng.ORDER.W_22] = val[1, 1]
@property
def d(self):
"""Parameter d from ref [3], calculated via equation 81.
Returns:
float: d from ref [3].
"""
return _np.sqrt(1 - _np.linalg.det(self.W))
@property
def R(self):
"""4D matrix that transforms from normal modes to XY plane.
Returns:
numpy.ndarray (4x4): R matrix from ref [3].
"""
deyes = self.d * _np.eye(2)
return _np.block([
[deyes, _symplectic_transpose(self.W)],
[-self.W, deyes]])
@property
def Rinv(self):
"""4D matrix that transforms from XY plane to normal modes.
Returns:
numpy.ndarray (4x4): Rinv matrix from ref [3].
"""
deyes = self.d * _np.eye(2)
return _np.block([
[deyes, -_symplectic_transpose(self.W)],
[self.W, deyes]])
def from_normal_modes(self, pos):
"""Transform from normal modes to XY plane.
Args:
pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in
normal modes coordinates.
Returns:
pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in
XY coordinates.
"""
pos = pos.copy()
pos[:4] = self.R @ pos[:4]
return pos
def to_normal_modes(self, pos):
"""Transform from XY plane to normal modes.
Args:
pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in
XY coordinates.
Returns:
pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in
normal mode coordinates.
"""
pos = pos.copy()
pos[:4] = self.Rinv @ pos[:4]
return pos
def make_dict(self):
"""."""
cod = self.co
beta = [self.beta1, self.beta2]
alpha = [self.alpha1, self.alpha2]
eta = [self.eta1, self.eta2]
etap = [self.etap1, self.etap2]
mus = [self.mu1, self.mu2]
return {
'co': cod, 'beta': beta, 'alpha': alpha,
'eta': eta, 'etap': etap, 'mu': mus}
@staticmethod
def make_new(*args, **kwrgs):
"""Build a Twiss object."""
if args:
if isinstance(args[0], dict):
kwrgs = args[0]
twi = EdwardsTengArray(1)
cod = kwrgs.get('co', (0.0,)*6)
twi['rx'], twi['px'], twi['ry'], twi['py'], twi['de'], twi['dl'] = cod
twi['mu1'], twi['mu2'] = kwrgs.get('mu', (0.0, 0.0))
twi['beta1'], twi['beta2'] = kwrgs.get('beta', (0.0, 0.0))
twi['alpha1'], twi['alpha2'] = kwrgs.get('alpha', (0.0, 0.0))
twi['eta1'], twi['eta2'] = kwrgs.get('eta', (0.0, 0.0))
twi['etap1'], twi['etap2'] = kwrgs.get('etap', (0.0, 0.0))
return twi[0]
def _set_co(self, value):
"""."""
try:
leng = len(value)
except TypeError:
leng = 6
value = [value, ]*leng
if leng != 6:
raise ValueError('closed orbit must have 6 elements.')
self[EdwardsTeng.ORDER.rx] = value[0]
self[EdwardsTeng.ORDER.px] = value[1]
self[EdwardsTeng.ORDER.ry] = value[2]
self[EdwardsTeng.ORDER.py] = value[3]
self[EdwardsTeng.ORDER.de] = value[4]
self[EdwardsTeng.ORDER.dl] = value[5]
class EdwardsTengArray(_np.ndarray):
"""Array of Edwards and Teng objects.
Notation is the same as in reference [3]
In case some strange results appear in phase advances or beta functions,
the reading of [2] is encouraged, since the authors discuss subtleties not
addressed here for strong coupled motion.
References:
[1] <NAME>., & <NAME>. (1973). Parametrization of Linear
Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear
Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279
[2] <NAME>., & <NAME>. (1999). Linear analysis of coupled
lattices. Physical Review Special Topics - Accelerators and Beams,
2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001
[3] <NAME>, Some Useful Linear Coupling Approximations.
C-A/AP/#101 Brookhaven Nat. Lab. (July 2003)
Contains the decomposed parameters:
spos (array, len(indices)x2x2) : longitudinal position [m]
beta1 (array, len(indices)) : beta of first eigen-mode
beta2 (array, len(indices)) : beta of second eigen-mode
alpha1 (array, len(indices)) : alpha of first eigen-mode
alpha2 (array, len(indices)) : alpha of second eigen-mode
gamma1 (array, len(indices)) : gamma of first eigen-mode
gamma2 (array, len(indices)) : gamma of second eigen-mode
mu1 (array, len(indices)): phase advance of the first eigen-mode
mu2 (array, len(indices)): phase advance of the second eigen-mode
L1 (array, len(indices)x2x2) : matrices L1 in [3]
L2 (array, len(indices)x2x2) : matrices L2 in [3]
W (array, len(indices)x2x2) : matrices W in [3]
d (array, len(indices)): d parameter in [3]
"""
def __eq__(self, other):
"""."""
return _np.all(super().__eq__(other))
def __new__(cls, edteng=None, copy=True):
"""."""
length = 1
if isinstance(edteng, (int, _np.int)):
length = edteng
edteng = None
elif isinstance(edteng, EdwardsTengArray):
return edteng.copy() if copy else edteng
if edteng is None:
arr = _np.zeros(
(length, len(EdwardsTeng.ORDER)), dtype=EdwardsTeng.DTYPE)
elif isinstance(edteng, _np.ndarray):
arr = edteng.copy() if copy else edteng
elif isinstance(edteng, _np.record):
arr = _np.ndarray(
(edteng.size, len(EdwardsTeng.ORDER)), buffer=edteng.data)
arr = arr.copy() if copy else arr
fmts = [(fmt, EdwardsTeng.DTYPE) for fmt in EdwardsTeng.ORDER._fields]
return super().__new__(
cls, shape=(arr.shape[0], ), dtype=(EdwardsTeng, fmts), buffer=arr)
@property
def spos(self):
"""."""
return self['spos']
@spos.setter
def spos(self, value):
self['spos'] = value
@property
def beta1(self):
"""."""
return self['beta1']
@beta1.setter
def beta1(self, value):
self['beta1'] = value
@property
def alpha1(self):
"""."""
return self['alpha1']
@alpha1.setter
def alpha1(self, value):
self['alpha1'] = value
@property
def gamma1(self):
"""."""
return (1 + self['alpha1']*self['alpha1'])/self['beta1']
@property
def mu1(self):
"""."""
return self['mu1']
@mu1.setter
def mu1(self, value):
self['mu1'] = value
@property
def beta2(self):
"""."""
return self['beta2']
@beta2.setter
def beta2(self, value):
self['beta2'] = value
@property
def alpha2(self):
"""."""
return self['alpha2']
@alpha2.setter
def alpha2(self, value):
self['alpha2'] = value
@property
def gamma2(self):
"""."""
return (1 + self['alpha2']*self['alpha2'])/self['beta2']
@property
def mu2(self):
"""."""
return self['mu2']
@mu2.setter
def mu2(self, value):
self['mu2'] = value
@property
def W_11(self):
"""."""
return self['W_11']
@W_11.setter
def W_11(self, val):
self['W_11'] = val
@property
def W_12(self):
"""."""
return self['W_12']
@W_12.setter
def W_12(self, val):
self['W_12'] = val
@property
def W_21(self):
"""."""
return self['W_21']
@W_21.setter
def W_21(self, val):
self['W_21'] = val
@property
def W_22(self):
"""."""
return self['W_22']
@W_22.setter
def W_22(self, val):
self['W_22'] = val
@property
def W(self):
"""2D mixing matrix from ref [3].
Returns:
numpy.ndarray (Nx2x2): W matrix from ref [3].
"""
mat = _np.zeros((self.W_11.size, 2, 2))
mat[:, 0, 0] = self.W_11
mat[:, 0, 1] = self.W_12
mat[:, 1, 0] = self.W_21
mat[:, 1, 1] = self.W_22
return mat
@W.setter
def W(self, value):
self.W_11 = value[:, 0, 0]
self.W_12 = value[:, 0, 1]
self.W_21 = value[:, 1, 0]
self.W_22 = value[:, 1, 1]
@property
def d(self):
"""Parameter d from ref [3], calculated via equation 81.
Returns:
numpy.ndarray (N, ): d from ref [3].
"""
return _np.sqrt(1-_np.linalg.det(self.W))
@property
def R(self):
"""4D matrix that transforms from normal modes to XY plane.
Returns:
numpy.ndarray (Nx4x4): R matrix from ref [3].
"""
deyes = self.d[:, None, None] * _np.eye(2)
return _np.block([
[deyes, _symplectic_transpose(self.W)],
[-self.W, deyes]])
@property
def Rinv(self):
"""4D matrix that transforms from XY plane to normal modes.
Returns:
numpy.ndarray (Nx4x4): Rinv matrix from ref [3].
"""
deyes = self.d[:, None, None] * _np.eye(2)
return _np.block([
[deyes, -_symplectic_transpose(self.W)],
[self.W, deyes]])
def from_normal_modes(self, pos):
"""Transform from normal modes to XY plane.
Args:
pos (numpy.ndarray): (4, len(self)) or (6, len(self)) positions in
phase space in normal modes coordinates.
Returns:
numpy.ndarray: (4, len(self)) or (6, len(self)) positions in phase
space in XY coordinates.
"""
pos = pos.copy()
pos[:4] = _np.einsum('ijk,ki->ji', self.R, pos[:4])
return pos
def to_normal_modes(self, pos):
"""Transform from XY plane to normal modes.
Args:
pos (numpy.ndarray): (4, len(self)) or (6, len(self)) positions in
phase space in XY coordinates.
Returns:
numpy.ndarray: (4, len(self)) or (6, len(self)) positions in phase
space in normal mode coordinates.
"""
pos = pos.copy()
pos[:4] = _np.einsum('ijk,ki->ji', self.Rinv, pos[:4])
return pos
@property
def eta1(self):
"""."""
return self['eta1']
@eta1.setter
def eta1(self, value):
self['eta1'] = value
@property
def etap1(self):
"""."""
return self['etap1']
@etap1.setter
def etap1(self, value):
self['etap1'] = value
@property
def eta2(self):
"""."""
return self['eta2']
@eta2.setter
def eta2(self, value):
self['eta2'] = value
@property
def etap2(self):
"""."""
return self['etap2']
@etap2.setter
def etap2(self, value):
self['etap2'] = value
@property
def rx(self):
"""."""
return self['rx']
@rx.setter
def rx(self, value):
self['rx'] = value
@property
def px(self):
"""."""
return self['px']
@px.setter
def px(self, value):
self['px'] = value
@property
def ry(self):
"""."""
return self['ry']
@ry.setter
def ry(self, value):
self['ry'] = value
@property
def py(self):
"""."""
return self['py']
@py.setter
def py(self, value):
self['py'] = value
@property
def de(self):
"""."""
return self['de']
@de.setter
def de(self, value):
self['de'] = value
@property
def dl(self):
"""."""
return self['dl']
@dl.setter
def dl(self, value):
self['dl'] = value
@property
def co(self):
"""Trajectory in XY plane coordinates.
Returns:
numpy.ndarray (6, ): 6D phase space trajectory around matrices were
calculated.
"""
return _np.array([
self.rx, self.px, self.ry, self.py, self.de, self.dl])
@co.setter
def co(self, value):
"""."""
self.rx, self.py = value[0], value[1]
self.ry, self.py = value[2], value[3]
self.de, self.dl = value[4], value[5]
@staticmethod
def compose(edteng_list):
"""."""
if isinstance(edteng_list, (list, tuple)):
for val in edteng_list:
if not isinstance(val, (EdwardsTeng, EdwardsTengArray)):
raise _OpticsException(
'can only compose lists of Twiss objects.')
else:
raise _OpticsException('can only compose lists of Twiss objects.')
arrs = list()
for val in edteng_list:
arrs.append(_np.ndarray(
(val.size, len(EdwardsTeng.ORDER)), buffer=val.data))
arrs = _np.vstack(arrs)
return EdwardsTengArray(arrs)
@_interactive
def calc_edwards_teng(
accelerator=None, init_edteng=None, indices='open',
energy_offset=None):
"""Perform linear analysis of coupled lattices.
Notation is the same as in reference [3]
In case some strange results appear in phase advances or beta functions,
the reading of [2] is encouraged, since the authors discuss subtleties not
addressed here for strong coupled motion.
References:
[1] <NAME>., & <NAME>. (1973). Parametrization of Linear
Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear
Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279
[2] <NAME>., & <NAME>. (1999). Linear analysis of coupled
lattices. Physical Review Special Topics - Accelerators and Beams,
2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001
[3] <NAME>, Some Useful Linear Coupling Approximations.
C-A/AP/#101 Brookhaven Nat. Lab. (July 2003)
Args:
accelerator (pyaccel.accelerator.Accelerator): lattice model
energy_offset (float, optional): Energy Offset . Defaults to 0.0.
init_edteng (pyaccel.optics.EdwardsTeng, optional): EdwardsTeng
parameters at the start of first element. Defaults to None.
energy_offset (float, optional): float denoting the energy deviation
(used only for periodic solutions). Defaults to None.
indices : may be a ((list, tuple, numpy.ndarray), optional):
list of element indices where closed orbit data is to be
returned or a string:
'open' : return the closed orbit at the entrance of all
elements.
'closed' : equal 'open' plus the orbit at the end of the last
element.
If indices is None data will be returned only at the entrance
of the first element. Defaults to 'open'.
Returns:
pyaccel.optics.EdwardsTengArray : array of decompositions of the
transfer matrices
numpy.ndarray (4x4): transfer matrix of the line/ring
"""
cod = None
if init_edteng is not None:
if energy_offset is not None:
raise _OpticsException(
'energy_offset and init_teng are mutually '
'exclusive options. Add the desired energy deviation to the '
'appropriate position of init_edteng object.')
# as a transport line: uses init_edteng
fixed_point = init_edteng.co
else:
# as a periodic system: try to find periodic solution
if accelerator.harmonic_number == 0:
raise _OpticsException(
'Either harmonic number was not set or calc_edwards_teng was '
'invoked for transport line without initial EdwardsTeng')
cod = _tracking.find_orbit(
accelerator, energy_offset=energy_offset, indices='closed')
fixed_point = cod[0]
m44, cum_mat = _tracking.find_m44(
accelerator, indices='closed', fixed_point=fixed_point)
indices = _tracking._process_indices(accelerator, indices)
edteng = EdwardsTengArray(cum_mat.shape[0])
edteng.spos = _lattice.find_spos(accelerator, indices='closed')
# Calculate initial matrices:
if init_edteng is None:
M = m44[:2, :2]
N = m44[2:4, 2:4]
m = m44[2:4, :2]
n = m44[:2, 2:4]
t = _np.trace(M - N)
m_plus_nbar = m + _symplectic_transpose(n)
det_m_plus_nbar = _np.linalg.det(m_plus_nbar)
u = _np.sign(t) * _np.sqrt(t*t + 4*det_m_plus_nbar)
dsqr = (1 + t/u)/2
W_over_d = -m_plus_nbar/(dsqr*u)
d0 = _np.sqrt(dsqr)
W0 = -m_plus_nbar/(d0*u)
L10 = M - n @ W_over_d
L20 = N + W_over_d @ n
# Get initial betas and alphas. (Based on calc_twiss of trackcpp.)
sin_mu1 = _np.sign(L10[0, 1]) * _np.sqrt(
-L10[0, 1]*L10[1, 0] - (L10[0, 0] - L10[1, 1])**2/4.0)
sin_mu2 = _np.sign(L20[0, 1]) * _np.sqrt(
-L20[0, 1]*L20[1, 0] - (L20[0, 0] - L20[1, 1])**2/4.0)
alpha10 = (L10[0, 0] - L10[1, 1])/2/sin_mu1
alpha20 = (L20[0, 0] - L20[1, 1])/2/sin_mu2
beta10 = L10[0, 1]/sin_mu1
beta20 = L20[0, 1]/sin_mu2
else:
W0 = init_edteng.W
d0 = init_edteng.d
alpha10 = init_edteng.alpha1
alpha20 = init_edteng.alpha2
beta10 = init_edteng.beta1
beta20 = init_edteng.beta2
# #### Determine Twiss Parameters of uncoupled motion #########
# First get the initial transfer matrices decompositions.
# (This method is based on equation 367 of ref[3])
M11 = cum_mat[:, :2, :2]
M12 = cum_mat[:, :2, 2:4]
M21 = cum_mat[:, 2:4, :2]
M22 = cum_mat[:, 2:4, 2:4]
L1 = (d0*M11 - M12@W0) / d0
L2 = (d0*M22 + M21@_symplectic_transpose(W0)) / d0
edteng.W = -(d0*M21 - M22@W0)@_symplectic_transpose(L1)
# Get optical functions along the ring (Based on calc_twiss of trackcpp):
edteng.beta1 = (
(L1[:, 0, 0]*beta10 - L1[:, 0, 1]*alpha10)**2 + L1[:, 0, 1]**2)/beta10
edteng.beta2 = (
(L2[:, 0, 0]*beta20 - L2[:, 0, 1]*alpha20)**2 + L2[:, 0, 1]**2)/beta20
edteng.alpha1 = -(
(L1[:, 0, 0]*beta10 - L1[:, 0, 1]*alpha10) *
(L1[:, 1, 0]*beta10 - L1[:, 1, 1]*alpha10) +
L1[:, 0, 1]*L1[:, 1, 1])/beta10
edteng.alpha2 = -(
(L2[:, 0, 0]*beta20 - L2[:, 0, 1]*alpha20) *
(L2[:, 1, 0]*beta20 - L2[:, 1, 1]*alpha20) +
L2[:, 0, 1]*L2[:, 1, 1])/beta20
mu1 = _np.arctan(L1[:, 0, 1]/(
L1[:, 0, 0]*beta10 - L1[:, 0, 1]*alpha10))
mu2 = _np.arctan(L2[:, 0, 1]/(
L2[:, 0, 0]*beta20 - L2[:, 0, 1]*alpha20))
# unwrap phases
summ = _np.zeros(mu1.size)
_np.cumsum(_np.diff(mu1) < 0, out=summ[1:])
mu1 += summ * _np.pi
_np.cumsum(_np.diff(mu2) < 0, out=summ[1:])
mu2 += summ * _np.pi
edteng.mu1 = mu1
edteng.mu2 = mu2
# ####### Handle dispersion function and orbit #######:
dp = 1e-6
if cod is not None:
coddp = _tracking.find_orbit(
accelerator, energy_offset=fixed_point[4]+dp, indices='closed')
else:
cod, *_ = _tracking.line_pass(
accelerator, fixed_point, indices='closed')
etas_norm = _np.array([
init_edteng.eta1, init_edteng.etap1,
init_edteng.eta2, init_edteng.etap2])
etas = init_edteng.from_normal_modes(etas_norm)
fixed_point[:4] += dp * etas
fixed_point[4] += dp
coddp, *_ = _tracking.line_pass(
accelerator, fixed_point, indices='closed')
eta = (coddp - cod) / dp
eta = edteng.to_normal_modes(eta)
edteng.co = cod
edteng.eta1 = eta[0]
edteng.etap1 = eta[1]
edteng.eta2 = eta[2]
edteng.etap2 = eta[3]
return edteng[indices], m44
def estimate_coupling_parameters(edteng_end):
"""Estimate minimum tune separation and emittance ratio.
The estimative uses Edwards and Teng decomposition of the one turn matrix.
Notation is the same as in reference [3]
Reading of [2] is encouraged, since the authors discuss subtleties not
addressed here for strong coupled motion.
References:
[1] <NAME>., & <NAME>. (1973). Parametrization of Linear
Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear
Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279
[2] <NAME>., & <NAME>. (1999). Linear analysis of coupled
lattices. Physical Review Special Topics - Accelerators and Beams,
2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001
[3] <NAME>, Some Useful Linear Coupling Approximations.
C-A/AP/#101 Brookhaven Nat. Lab. (July 2003)
Args:
edteng_end (pyaccel.optics.EdwardsTengArray): EdwardsTeng parameters
around the ring.
Returns:
min_tunesep (float) : estimative of minimum tune separation,
Based on equation 85-87 of ref [3]:
Assuming we are at the sum resonance, then T = 0.
So we can write:
mu1 = mu0 - minsep/2
mu2 = mu0 + minsep/2
where mu0 = (mu1 + mu2) / 2, and
U = 2*sqrt(det(m+nbar))
However, we know that
U = 2*cos(mu1) - 2*cos(mu2)
U = 2*cos(mu0-minsep/2) - 2*cos(mu0+minsep/2)
U = 4*sin(mu0) * sin(minsep/2)
which yields,
sin(minsep/2) = sqrt(det(m+nbar))/sin(mu0)/2
ratio (numpy.ndarray): estimative of invariant sharing ratio.
Based on equation 216, 217 and 237 of ref [3].
The ratio is not invariant along the ring.
So the whole vector is returned.
An average of this value could be used to estimate the ratio.
"""
edt = edteng_end
# ###### Estimative of emittance ratio #######
# Equations 216 and 217 of ref [3]
D2 = edt.beta2*edt.W_22**2 + 2*edt.alpha2*edt.W_22*edt.W_12
D1 = edt.beta1*edt.W_11**2 - 2*edt.alpha1*edt.W_11*edt.W_12
D2 += edt.gamma2*edt.W_12**2
D1 += edt.gamma1*edt.W_12**2
# Equation 237 of ref [3]
ratio = 1/edt.d**2 * _np.sqrt(D1*D2/edt.beta1/edt.beta2)
# # This second formula is based on equation 258 of ref [3] which is
# # approximately valid for weak coupling:
# dsqr = edt.d ** 2
# ratio = (1-dsqr)/dsqr
# ###### Estimate Minimum tune separation #####
# from equations 85, 86 and 89 of ref [3]:
edt = edt[-1]
dsqr = edt.d * edt.d
U = 2*( | _np.cos(edt.mu1) | numpy.cos |
# Copyright 2016-2019 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transform_utils"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage.measure import label
from tensorflow.python.platform import test
from tensorflow.python.keras import backend as K
from deepcell.utils import transform_utils
def _get_image(img_h=300, img_w=300):
bias = np.random.rand(img_w, img_h) * 64
variance = np.random.rand(img_w, img_h) * (255 - 64)
img = np.random.rand(img_w, img_h) * variance + bias
return img
def _generate_test_masks():
img_w = img_h = 30
mask_images = []
for _ in range(8):
imarray = np.random.randint(2, size=(img_w, img_h, 1))
mask_images.append(imarray)
return mask_images
class TransformUtilsTest(test.TestCase):
def test_deepcell_transform_2d(self):
# test single edge class
maskstack = np.array([label(i) for i in _generate_test_masks()])
dc_maskstack = transform_utils.deepcell_transform(
maskstack, data_format=None, separate_edge_classes=False)
dc_maskstack_dil = transform_utils.deepcell_transform(
maskstack, dilation_radius=1,
data_format='channels_last',
separate_edge_classes=False)
self.assertEqual(dc_maskstack.shape[-1], 3)
self.assertEqual(dc_maskstack_dil.shape[-1], 3)
self.assertGreater(
dc_maskstack_dil[..., 0].sum() + dc_maskstack_dil[..., 1].sum(),
dc_maskstack[..., 0].sum() + dc_maskstack[..., 1].sum())
# test separate edge classes
maskstack = np.array([label(i) for i in _generate_test_masks()])
dc_maskstack = transform_utils.deepcell_transform(
maskstack, data_format=None, separate_edge_classes=True)
dc_maskstack_dil = transform_utils.deepcell_transform(
maskstack, dilation_radius=1,
data_format='channels_last',
separate_edge_classes=True)
self.assertEqual(dc_maskstack.shape[-1], 4)
self.assertEqual(dc_maskstack_dil.shape[-1], 4)
self.assertGreater(
dc_maskstack_dil[..., 0].sum() + dc_maskstack_dil[..., 1].sum(),
dc_maskstack[..., 0].sum() + dc_maskstack[..., 1].sum())
def test_deepcell_transform_3d(self):
frames = 10
img_list = []
for im in _generate_test_masks():
frame_list = []
for _ in range(frames):
frame_list.append(label(im))
img_stack = np.array(frame_list)
img_list.append(img_stack)
# test single edge class
maskstack = np.vstack(img_list)
batch_count = maskstack.shape[0] // frames
new_shape = (batch_count, frames, *maskstack.shape[1:])
maskstack = np.reshape(maskstack, new_shape)
dc_maskstack = transform_utils.deepcell_transform(
maskstack, data_format=None, separate_edge_classes=False)
dc_maskstack_dil = transform_utils.deepcell_transform(
maskstack, dilation_radius=2,
data_format='channels_last',
separate_edge_classes=False)
self.assertEqual(dc_maskstack.shape[-1], 3)
self.assertEqual(dc_maskstack_dil.shape[-1], 3)
self.assertGreater(
dc_maskstack_dil[..., 0].sum() + dc_maskstack_dil[..., 1].sum(),
dc_maskstack[..., 0].sum() + dc_maskstack[..., 1].sum())
# test separate edge classes
maskstack = np.vstack(img_list)
batch_count = maskstack.shape[0] // frames
new_shape = (batch_count, frames, *maskstack.shape[1:])
maskstack = np.reshape(maskstack, new_shape)
dc_maskstack = transform_utils.deepcell_transform(
maskstack, data_format=None, separate_edge_classes=True)
dc_maskstack_dil = transform_utils.deepcell_transform(
maskstack, dilation_radius=2,
data_format='channels_last',
separate_edge_classes=True)
self.assertEqual(dc_maskstack.shape[-1], 4)
self.assertEqual(dc_maskstack_dil.shape[-1], 4)
self.assertGreater(
dc_maskstack_dil[..., 0].sum() + dc_maskstack_dil[..., 1].sum(),
dc_maskstack[..., 0].sum() + dc_maskstack[..., 1].sum())
def test_erode_edges_2d(self):
for img in _generate_test_masks():
img = label(img)
img = np.squeeze(img)
erode_0 = transform_utils.erode_edges(img, erosion_width=0)
erode_1 = transform_utils.erode_edges(img, erosion_width=1)
erode_2 = transform_utils.erode_edges(img, erosion_width=2)
self.assertEqual(img.shape, erode_0.shape)
self.assertEqual(erode_0.shape, erode_1.shape)
self.assertEqual(erode_1.shape, erode_2.shape)
self.assertAllEqual(erode_0, img)
self.assertGreater(np.sum(erode_0), np.sum(erode_1))
self.assertGreater(np.sum(erode_1), np.sum(erode_2))
# test too few dims
with self.assertRaises(ValueError):
erode_1 = transform_utils.erode_edges(img[0], erosion_width=1)
def test_erode_edges_3d(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
unique = np.squeeze(unique)
erode_0 = transform_utils.erode_edges(unique, erosion_width=0)
erode_1 = transform_utils.erode_edges(unique, erosion_width=1)
erode_2 = transform_utils.erode_edges(unique, erosion_width=2)
self.assertEqual(unique.shape, erode_0.shape)
self.assertEqual(erode_0.shape, erode_1.shape)
self.assertEqual(erode_1.shape, erode_2.shape)
self.assertAllEqual(erode_0, unique)
self.assertGreater(np.sum(erode_0), np.sum(erode_1))
self.assertGreater(np.sum(erode_1), np.sum(erode_2))
# test too many dims
with self.assertRaises(ValueError):
unique = np.expand_dims(unique, axis=-1)
erode_1 = transform_utils.erode_edges(unique, erosion_width=1)
def test_distance_transform_3d(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
K.set_image_data_format('channels_last')
bins = 3
distance = transform_utils.distance_transform_3d(unique, bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.distance_transform_3d(unique, bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
K.set_image_data_format('channels_first')
unique = np.rollaxis(unique, -1, 1)
bins = 3
distance = transform_utils.distance_transform_3d(unique, bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.distance_transform_3d(unique, bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
def test_distance_transform_2d(self):
for img in _generate_test_masks():
K.set_image_data_format('channels_last')
bins = 3
distance = transform_utils.distance_transform_2d(img, bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape)
bins = 4
distance = transform_utils.distance_transform_2d(img, bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape)
K.set_image_data_format('channels_first')
img = np.rollaxis(img, -1, 1)
bins = 3
distance = transform_utils.distance_transform_2d(img, bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual( | np.expand_dims(distance, axis=1) | numpy.expand_dims |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility modules for evaluating model from checkpoint.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
from tensorflow_graphics.projects.local_implicit_grid.core import implicit_nets as im
from tensorflow_graphics.projects.local_implicit_grid.core import local_implicit_grid_layer as lig
from tensorflow_graphics.projects.local_implicit_grid.core import model_g2g as g2g
from tensorflow_graphics.projects.local_implicit_grid.core import model_g2v as g2v
tf.logging.set_verbosity(tf.logging.ERROR)
def parse_param_file(param_file):
"""Parse parameter file for parameters."""
with gfile.GFile(param_file, 'r') as fh:
lines = fh.readlines()
d = {}
for l in lines:
l = l.rstrip('\n')
splits = l.split(':')
key = splits[0]
val_ = splits[1].strip()
if not val_:
val = ''
else:
try:
val = ast.literal_eval(val_)
except (ValueError, SyntaxError):
val = str(val_)
d[key] = val
return d
class RefinerEvaluator(object):
"""Load pretrained refiner and evaluate for a given code.
"""
def __init__(self, ckpt, codelen, dim=3, out_features=1, num_filters=128,
point_batch=20000):
self.ckpt = ckpt
self.codelen = codelen
self.dim = dim
self.out_features = out_features
self.num_filters = num_filters
self.point_batch = point_batch
self.graph = tf.Graph()
self._init_graph()
self.global_step_ = self.global_step.eval(session=self.sess)
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.refiner = im.ImNet(dim=self.dim,
in_features=self.codelen,
out_features=self.out_features,
num_filters=self.num_filters)
self.global_step = tf.get_variable('global_step', shape=[],
dtype=tf.int64)
self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])
lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
[self.point_batch, self.codelen])
code = tf.concat((self.pts_ph, lat), axis=-1) # [pb, 3+c]
vals = self.refiner(code, training=False) # [pb, 1]
self.vals = tf.squeeze(vals, axis=1) # [pb]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
def _get_grid_points(self, xmin, xmax, res):
x = np.linspace(xmin, xmax, res)
xyz = np.meshgrid(*tuple([x] * self.dim), indexing='ij')
xyz = np.stack(xyz, axis=-1)
xyz = xyz.reshape([-1, self.dim])
return xyz
def eval_points(self, lat, points):
"""Evaluate network at locations specified by points.
Args:
lat: [self.codelen,] np array, latent code.
points: [#v, self.dim] np array, point locations to evaluate.
Returns:
all_vals: [#v] np array, function values at locations.
"""
npt = points.shape[0]
npb = int(np.ceil(float(npt)/self.point_batch))
all_vals = np.zeros([npt], dtype=np.float32)
for idx in range(npb):
sid = int(idx * self.point_batch)
eid = int(min(npt, sid+self.point_batch))
pts = points[sid:eid]
pad_w = self.point_batch - (eid - sid)
pts = np.pad(pts, ((0, pad_w), (0, 0)), mode='constant')
with self.graph.as_default():
val = self.sess.run(self.vals, feed_dict={self.pts_ph: pts,
self.lat_ph: lat})
all_vals[sid:eid] = val[:(eid-sid)]
return all_vals
def eval_grid(self, lat, xmin=-1.0, xmax=1.0, res=64):
"""Evaluate network on a grid.
Args:
lat: [self.codelen,] np array, latent code.
xmin: float, minimum coordinate value for grid.
xmax: float, maximum coordinate value for grid.
res: int, resolution (per dimension) of grid.
Returns:
grid_val: [res, res, res] np.float32 array, grid of values from query.
"""
grid_points = self._get_grid_points(xmin=xmin, xmax=xmax, res=res)
point_val = self.eval_points(lat, grid_points)
grid_val = point_val.reshape([res, res, res])
return grid_val
class EncoderEvaluator(object):
"""Load pretrained grid encoder and evaluate single crops."""
def __init__(self,
ckpt,
in_grid_res=32,
encoder_nf=32,
codelen=32,
grid_batch=128):
"""Initialization function.
Args:
ckpt: str, path to checkpoint.
in_grid_res: int, resolution of grid to feed to encoder.
encoder_nf: int, number of base filters for encoder.
codelen: int, length of output latent code.
grid_batch: int, batch size of cut-out grid to evaluate at a time.
"""
self.ckpt = ckpt
self.codelen = codelen
self.grid_batch = grid_batch
self.in_grid_res = in_grid_res
self.encoder_nf = encoder_nf
self.graph = tf.Graph()
self._init_graph() # creates self.sess
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.encoder = g2v.GridEncoder(in_grid_res=self.in_grid_res,
num_filters=self.encoder_nf,
codelen=self.codelen,
name='g2v')
self.grid_ph = tf.placeholder(
tf.float32,
shape=[None, self.in_grid_res, self.in_grid_res, self.in_grid_res, 1])
self.lats = self.encoder(self.grid_ph, training=False) # [gb, codelen]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
def eval_grid(self, grid):
"""Strided evaluation of full grid into feature grid.
Args:
grid: [batch, gres, gres, gres, 1] input feature grid.
Returns:
codes: [batch, codelen] output feature gird.
"""
# initialize output feature grid
niters = int(np.ceil(grid.shape[0] / self.grid_batch))
codes = []
for idx in range(niters):
sid = idx * self.grid_batch
eid = min(sid+self.grid_batch, grid.shape[0])
c = self.sess.run(self.lats,
feed_dict={self.grid_ph: grid[sid:eid]})
codes.append(c)
codes = np.concatenate(codes, axis=0)
return codes.astype(np.float32)
class FullGridEncoderEvaluator(object):
"""Load pretrained grid encoder and evaluate a full input grid.
Performs windowed encoding and outputs an encoded feature grid.
"""
def __init__(self,
ckpt,
in_grid_res=32,
num_filters=32,
codelen=128,
grid_batch=128,
gres=256,
overlap=True):
"""Initialization function.
Args:
ckpt: str, path to checkpoint.
in_grid_res: int, resolution of grid to feed to encoder.
num_filters: int, number of base filters for encoder.
codelen: int, length of output latent code.
grid_batch: int, batch size of cut-out grid to evaluate at a time.
gres: int, resolution of the full grid.
overlap: bool, whether to do overlapping or non-overlapping cutout
evaluations.
"""
self.ckpt = ckpt
self.codelen = codelen
self.grid_batch = grid_batch
self.in_grid_res = in_grid_res
self.gres = gres
self.num_filters = num_filters
self.graph = tf.Graph()
self._init_graph()
self.global_step_ = self.global_step.eval(session=self.sess)
if overlap:
ijk = np.arange(0, gres-int(in_grid_res/2), int(in_grid_res/2))
self.out_grid_res = ijk.shape[0]
else:
ijk = np.arange(0, gres, in_grid_res)
self.out_grid_res = ijk.shape[0]
self.ijk = np.meshgrid(ijk, ijk, ijk, indexing='ij')
self.ijk = np.stack(self.ijk, axis=-1).reshape([-1, 3])
def _init_graph(self):
"""Initialize computation graph for tensorflow."""
with self.graph.as_default():
self.encoder = g2v.GridEncoder(
in_grid_res=self.in_grid_res,
num_filters=self.num_filters,
codelen=self.codelen,
name='g2v')
self.global_step = tf.get_variable(
'global_step', shape=[], dtype=tf.int64)
self.grid_ph = tf.placeholder(
tf.float32, shape=[self.gres, self.gres, self.gres])
self.start_ph = tf.placeholder(tf.int32, shape=[self.grid_batch, 3])
self.ingrid = self._batch_slice(self.grid_ph, self.start_ph,
self.in_grid_res, self.grid_batch)
self.ingrid = self.ingrid[..., tf.newaxis]
self.lats = self.encoder(self.ingrid, training=False) # [gb, codelen]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
def _batch_slice(self, ary, start_ijk, w, batch_size):
"""Batched slicing of original grid.
Args:
ary: tensor, rank = 3.
start_ijk: [batch_size, 3] tensor, starting index.
w: width of cube to extract.
batch_size: int, batch size.
Returns:
batched_slices: [batch_size, w, w, w] tensor, batched slices of ary.
"""
batch_size = start_ijk.shape[0]
ijk = tf.range(w, dtype=tf.int32)
slice_idx = tf.meshgrid(ijk, ijk, ijk, indexing='ij')
slice_idx = tf.stack(
slice_idx, axis=-1) # [in_grid_res, in_grid_res, in_grid_res, 3]
slice_idx = tf.broadcast_to(slice_idx[tf.newaxis], [batch_size, w, w, w, 3])
offset = tf.broadcast_to(
start_ijk[:, tf.newaxis, tf.newaxis, tf.newaxis, :],
[batch_size, w, w, w, 3])
slice_idx += offset
# [batch_size, in_grid_res, in_grid_res, in_grid_res, 3]
batched_slices = tf.gather_nd(ary, slice_idx)
# [batch_size, in_grid_res, in_grid_res, in_grid_res]
return batched_slices
def eval_grid(self, grid):
"""Strided evaluation of full grid into feature grid.
Args:
grid: [gres, gres, gres] input feature grid.
Returns:
ogrid: [out_grid_res, out_grid_res, out_grid_res, codelen] output feature
gird.
"""
# initialize output feature grid
ogrid = np.zeros([self.ijk.shape[0], self.codelen])
niters = np.ceil(self.ijk.shape[0] / self.grid_batch).astype(np.int)
for idx in range(niters):
sid = idx * self.grid_batch
eid = min(sid + self.grid_batch, self.ijk.shape[0])
start_ijk = self.ijk[sid:eid]
# pad if last iteration does not have a full batch
pad_w = self.grid_batch - start_ijk.shape[0]
start_ijk = np.pad(start_ijk, ((0, pad_w), (0, 0)), mode='constant')
lats = self.sess.run(
self.lats, feed_dict={
self.grid_ph: grid,
self.start_ph: start_ijk
})
ogrid[sid:eid] = lats[:eid - sid]
ogrid = ogrid.reshape(
[self.out_grid_res, self.out_grid_res, self.out_grid_res, self.codelen])
return ogrid.astype(np.float32)
class LIGEvaluator(object):
"""Load pretrained grid refiner and evaluate a feature grid.
"""
def __init__(self,
ckpt,
size=(15, 15, 15),
in_features=32,
out_features=1,
x_location_max=1,
num_filters=32,
min_grid_value=(0., 0., 0.),
max_grid_value=(1., 1., 1.),
net_type='imnet',
method='linear',
point_batch=20000,
scope=''):
"""Initialization function.
Args:
ckpt: str, path to checkpoint.
size: list or tuple of ints, grid dimension in each dimension.
in_features: int, number of input channels.
out_features: int, number of output channels.
x_location_max: float, relative coordinate range for one voxel.
num_filters: int, number of filters for refiner.
min_grid_value: tuple, lower bound of query points.
max_grid_value: tuple, upper bound of query points.
net_type: str, one of occnet/deepsdf.
method: str, one of linear/nn.
point_batch: int, pseudo batch size for evaluating points.
scope: str, scope of imnet layer.
"""
self.dim = 3 # hardcode for dim = 3
self.ckpt = ckpt
self.size = size
self.x_location_max = x_location_max
self.num_filters = num_filters
self.in_features = in_features
self.out_features = out_features
self.net_type = net_type
self.method = method
self.point_batch = point_batch
self.scope = scope
self.min_grid_value = min_grid_value
self.max_grid_value = max_grid_value
self.graph = tf.Graph()
self._init_graph()
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.lig = lig.LocalImplicitGrid(size=self.size,
in_features=self.in_features,
out_features=self.out_features,
num_filters=self.num_filters,
net_type=self.net_type,
method=self.method,
x_location_max=self.x_location_max,
min_grid_value=self.min_grid_value,
max_grid_value=self.max_grid_value,
name='lig')
self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
self.latgrid_ph = tf.placeholder(tf.float32,
shape=[self.size[0],
self.size[1],
self.size[2],
self.in_features])
self.latgrid = self.latgrid_ph[tf.newaxis]
self.points = self.pts_ph[tf.newaxis]
vals = self.lig(self.latgrid, self.points, training=False) # [1,npts,1]
self.vals = tf.squeeze(vals, axis=[0, 2]) # [npts]
self.map_dict = self._get_var_mapping(model=self.lig)
self.saver = tf.train.Saver(self.map_dict)
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
def _get_grid_points(self, xmin, xmax, res):
x = | np.linspace(xmin, xmax, res) | numpy.linspace |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Image assessment algorithms. Typical overlap and error computation
measures to evaluate results from other processing units.
"""
import os
import os.path as op
import nibabel as nb
import numpy as np
from .. import config, logging
from ..interfaces.base import (
SimpleInterface,
BaseInterface,
traits,
TraitedSpec,
File,
InputMultiPath,
BaseInterfaceInputSpec,
isdefined,
)
from ..interfaces.nipy.base import NipyBaseInterface
iflogger = logging.getLogger("nipype.interface")
class DistanceInputSpec(BaseInterfaceInputSpec):
volume1 = File(
exists=True, mandatory=True, desc="Has to have the same dimensions as volume2."
)
volume2 = File(
exists=True, mandatory=True, desc="Has to have the same dimensions as volume1."
)
method = traits.Enum(
"eucl_min",
"eucl_cog",
"eucl_mean",
"eucl_wmean",
"eucl_max",
desc='""eucl_min": Euclidean distance between two closest points\
"eucl_cog": mean Euclidian distance between the Center of Gravity\
of volume1 and CoGs of volume2\
"eucl_mean": mean Euclidian minimum distance of all volume2 voxels\
to volume1\
"eucl_wmean": mean Euclidian minimum distance of all volume2 voxels\
to volume1 weighted by their values\
"eucl_max": maximum over minimum Euclidian distances of all volume2\
voxels to volume1 (also known as the Hausdorff distance)',
usedefault=True,
)
mask_volume = File(exists=True, desc="calculate overlap only within this mask.")
class DistanceOutputSpec(TraitedSpec):
distance = traits.Float()
point1 = traits.Array(shape=(3,))
point2 = traits.Array(shape=(3,))
histogram = File()
class Distance(BaseInterface):
"""Calculates distance between two volumes."""
input_spec = DistanceInputSpec
output_spec = DistanceOutputSpec
_hist_filename = "hist.pdf"
def _find_border(self, data):
from scipy.ndimage.morphology import binary_erosion
eroded = binary_erosion(data)
border = np.logical_and(data, np.logical_not(eroded))
return border
def _get_coordinates(self, data, affine):
if len(data.shape) == 4:
data = data[:, :, :, 0]
indices = np.vstack(np.nonzero(data))
indices = np.vstack((indices, np.ones(indices.shape[1])))
coordinates = np.dot(affine, indices)
return coordinates[:3, :]
def _eucl_min(self, nii1, nii2):
from scipy.spatial.distance import cdist, euclidean
origdata1 = np.asanyarray(nii1.dataobj).astype(bool)
border1 = self._find_border(origdata1)
origdata2 = np.asanyarray(nii2.dataobj).astype(bool)
border2 = self._find_border(origdata2)
set1_coordinates = self._get_coordinates(border1, nii1.affine)
set2_coordinates = self._get_coordinates(border2, nii2.affine)
dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T)
(point1, point2) = np.unravel_index(np.argmin(dist_matrix), dist_matrix.shape)
return (
euclidean(set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]),
set1_coordinates.T[point1, :],
set2_coordinates.T[point2, :],
)
def _eucl_cog(self, nii1, nii2):
from scipy.spatial.distance import cdist
from scipy.ndimage.measurements import center_of_mass, label
origdata1 = np.asanyarray(nii1.dataobj)
origdata1 = (np.rint(origdata1) != 0) & ~np.isnan(origdata1)
cog_t = np.array(center_of_mass(origdata1)).reshape(-1, 1)
cog_t = np.vstack((cog_t, np.array([1])))
cog_t_coor = np.dot(nii1.affine, cog_t)[:3, :]
origdata2 = np.asanyarray(nii2.dataobj)
origdata2 = (np.rint(origdata2) != 0) & ~np.isnan(origdata2)
(labeled_data, n_labels) = label(origdata2)
cogs = np.ones((4, n_labels))
for i in range(n_labels):
cogs[:3, i] = np.array(center_of_mass(origdata2, labeled_data, i + 1))
cogs_coor = np.dot(nii2.affine, cogs)[:3, :]
dist_matrix = cdist(cog_t_coor.T, cogs_coor.T)
return np.mean(dist_matrix)
def _eucl_mean(self, nii1, nii2, weighted=False):
from scipy.spatial.distance import cdist
origdata1 = np.asanyarray(nii1.dataobj).astype(bool)
border1 = self._find_border(origdata1)
origdata2 = np.asanyarray(nii2.dataobj).astype(bool)
set1_coordinates = self._get_coordinates(border1, nii1.affine)
set2_coordinates = self._get_coordinates(origdata2, nii2.affine)
dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T)
min_dist_matrix = np.amin(dist_matrix, axis=0)
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
plt.figure()
plt.hist(min_dist_matrix, 50, normed=1, facecolor="green")
plt.savefig(self._hist_filename)
plt.clf()
plt.close()
if weighted:
return np.average(min_dist_matrix, weights=nii2.dataobj[origdata2].flat)
else:
return np.mean(min_dist_matrix)
def _eucl_max(self, nii1, nii2):
from scipy.spatial.distance import cdist
origdata1 = np.asanyarray(nii1.dataobj)
origdata1 = (np.rint(origdata1) != 0) & ~np.isnan(origdata1)
origdata2 = np.asanyarray(nii2.dataobj)
origdata2 = (np.rint(origdata2) != 0) & ~np.isnan(origdata2)
if isdefined(self.inputs.mask_volume):
maskdata = np.asanyarray(nb.load(self.inputs.mask_volume).dataobj)
maskdata = (np.rint(maskdata) != 0) & ~np.isnan(maskdata)
origdata1 = np.logical_and(maskdata, origdata1)
origdata2 = np.logical_and(maskdata, origdata2)
if origdata1.max() == 0 or origdata2.max() == 0:
return np.nan
border1 = self._find_border(origdata1)
border2 = self._find_border(origdata2)
set1_coordinates = self._get_coordinates(border1, nii1.affine)
set2_coordinates = self._get_coordinates(border2, nii2.affine)
distances = cdist(set1_coordinates.T, set2_coordinates.T)
mins = np.concatenate((np.amin(distances, axis=0), np.amin(distances, axis=1)))
return np.max(mins)
def _run_interface(self, runtime):
# there is a bug in some scipy ndimage methods that gets tripped by memory mapped objects
nii1 = nb.load(self.inputs.volume1, mmap=False)
nii2 = nb.load(self.inputs.volume2, mmap=False)
if self.inputs.method == "eucl_min":
self._distance, self._point1, self._point2 = self._eucl_min(nii1, nii2)
elif self.inputs.method == "eucl_cog":
self._distance = self._eucl_cog(nii1, nii2)
elif self.inputs.method == "eucl_mean":
self._distance = self._eucl_mean(nii1, nii2)
elif self.inputs.method == "eucl_wmean":
self._distance = self._eucl_mean(nii1, nii2, weighted=True)
elif self.inputs.method == "eucl_max":
self._distance = self._eucl_max(nii1, nii2)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["distance"] = self._distance
if self.inputs.method == "eucl_min":
outputs["point1"] = self._point1
outputs["point2"] = self._point2
elif self.inputs.method in ["eucl_mean", "eucl_wmean"]:
outputs["histogram"] = os.path.abspath(self._hist_filename)
return outputs
class OverlapInputSpec(BaseInterfaceInputSpec):
volume1 = File(
exists=True, mandatory=True, desc="Has to have the same dimensions as volume2."
)
volume2 = File(
exists=True, mandatory=True, desc="Has to have the same dimensions as volume1."
)
mask_volume = File(exists=True, desc="calculate overlap only within this mask.")
bg_overlap = traits.Bool(
False, usedefault=True, mandatory=True, desc="consider zeros as a label"
)
out_file = File("diff.nii", usedefault=True)
weighting = traits.Enum(
"none",
"volume",
"squared_vol",
usedefault=True,
desc=(
"'none': no class-overlap weighting is "
"performed. 'volume': computed class-"
"overlaps are weighted by class volume "
"'squared_vol': computed class-overlaps "
"are weighted by the squared volume of "
"the class"
),
)
vol_units = traits.Enum(
"voxel", "mm", mandatory=True, usedefault=True, desc="units for volumes"
)
class OverlapOutputSpec(TraitedSpec):
jaccard = traits.Float(desc="averaged jaccard index")
dice = traits.Float(desc="averaged dice index")
roi_ji = traits.List(traits.Float(), desc=("the Jaccard index (JI) per ROI"))
roi_di = traits.List(traits.Float(), desc=("the Dice index (DI) per ROI"))
volume_difference = traits.Float(desc=("averaged volume difference"))
roi_voldiff = traits.List(traits.Float(), desc=("volume differences of ROIs"))
labels = traits.List(traits.Int(), desc=("detected labels"))
diff_file = File(exists=True, desc="error map of differences")
class Overlap(BaseInterface):
"""
Calculates Dice and Jaccard's overlap measures between two ROI maps.
The interface is backwards compatible with the former version in
which only binary files were accepted.
The averaged values of overlap indices can be weighted. Volumes
now can be reported in :math:`mm^3`, although they are given in voxels
to keep backwards compatibility.
Example
-------
>>> overlap = Overlap()
>>> overlap.inputs.volume1 = 'cont1.nii'
>>> overlap.inputs.volume2 = 'cont2.nii'
>>> res = overlap.run() # doctest: +SKIP
"""
input_spec = OverlapInputSpec
output_spec = OverlapOutputSpec
def _bool_vec_dissimilarity(self, booldata1, booldata2, method):
from scipy.spatial.distance import dice, jaccard
methods = {"dice": dice, "jaccard": jaccard}
if not (np.any(booldata1) or np.any(booldata2)):
return 0
return 1 - methods[method](booldata1.flat, booldata2.flat)
def _run_interface(self, runtime):
nii1 = nb.load(self.inputs.volume1)
nii2 = nb.load(self.inputs.volume2)
scale = 1.0
if self.inputs.vol_units == "mm":
scale = np.prod(nii1.header.get_zooms()[:3])
data1 = np.asanyarray(nii1.dataobj)
data1[np.logical_or(data1 < 0, np.isnan(data1))] = 0
max1 = int(data1.max())
data1 = data1.astype(np.min_scalar_type(max1))
data2 = np.asanyarray(nii2.dataobj).astype(np.min_scalar_type(max1))
data2[np.logical_or(data1 < 0, np.isnan(data1))] = 0
if isdefined(self.inputs.mask_volume):
maskdata = np.asanyarray(nb.load(self.inputs.mask_volume).dataobj)
maskdata = ~np.logical_or(maskdata == 0, np.isnan(maskdata))
data1[~maskdata] = 0
data2[~maskdata] = 0
res = []
volumes1 = []
volumes2 = []
labels = np.unique(data1[data1 > 0].reshape(-1)).tolist()
if self.inputs.bg_overlap:
labels.insert(0, 0)
for l in labels:
res.append(
self._bool_vec_dissimilarity(data1 == l, data2 == l, method="jaccard")
)
volumes1.append(scale * len(data1[data1 == l]))
volumes2.append(scale * len(data2[data2 == l]))
results = dict(jaccard=[], dice=[])
results["jaccard"] = np.array(res)
results["dice"] = 2.0 * results["jaccard"] / (results["jaccard"] + 1.0)
weights = np.ones((len(volumes1),), dtype=np.float32)
if self.inputs.weighting != "none":
weights = weights / np.array(volumes1)
if self.inputs.weighting == "squared_vol":
weights = weights**2
weights = weights / np.sum(weights)
both_data = np.zeros(data1.shape)
both_data[(data1 - data2) != 0] = 1
nb.save(
nb.Nifti1Image(both_data, nii1.affine, nii1.header), self.inputs.out_file
)
self._labels = labels
self._ove_rois = results
self._vol_rois = (np.array(volumes1) - np.array(volumes2)) / np.array(volumes1)
self._dice = round(np.sum(weights * results["dice"]), 5)
self._jaccard = round(np.sum(weights * results["jaccard"]), 5)
self._volume = np.sum(weights * self._vol_rois)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["labels"] = self._labels
outputs["jaccard"] = self._jaccard
outputs["dice"] = self._dice
outputs["volume_difference"] = self._volume
outputs["roi_ji"] = self._ove_rois["jaccard"].tolist()
outputs["roi_di"] = self._ove_rois["dice"].tolist()
outputs["roi_voldiff"] = self._vol_rois.tolist()
outputs["diff_file"] = os.path.abspath(self.inputs.out_file)
return outputs
class FuzzyOverlapInputSpec(BaseInterfaceInputSpec):
in_ref = InputMultiPath(
File(exists=True),
mandatory=True,
desc="Reference image. Requires the same dimensions as in_tst.",
)
in_tst = InputMultiPath(
File(exists=True),
mandatory=True,
desc="Test image. Requires the same dimensions as in_ref.",
)
in_mask = File(exists=True, desc="calculate overlap only within mask")
weighting = traits.Enum(
"none",
"volume",
"squared_vol",
usedefault=True,
desc=(
"'none': no class-overlap weighting is "
"performed. 'volume': computed class-"
"overlaps are weighted by class volume "
"'squared_vol': computed class-overlaps "
"are weighted by the squared volume of "
"the class"
),
)
out_file = File(
"diff.nii",
desc="alternative name for resulting difference-map",
usedefault=True,
)
class FuzzyOverlapOutputSpec(TraitedSpec):
jaccard = traits.Float(desc="Fuzzy Jaccard Index (fJI), all the classes")
dice = traits.Float(desc="Fuzzy Dice Index (fDI), all the classes")
class_fji = traits.List(
traits.Float(), desc="Array containing the fJIs of each computed class"
)
class_fdi = traits.List(
traits.Float(), desc="Array containing the fDIs of each computed class"
)
class FuzzyOverlap(SimpleInterface):
"""Calculates various overlap measures between two maps, using the fuzzy
definition proposed in: Crum et al., Generalized Overlap Measures for
Evaluation and Validation in Medical Image Analysis, IEEE Trans. Med.
Ima. 25(11),pp 1451-1461, Nov. 2006.
in_ref and in_tst are lists of 2/3D images, each element on the list
containing one volume fraction map of a class in a fuzzy partition
of the domain.
Example
-------
>>> overlap = FuzzyOverlap()
>>> overlap.inputs.in_ref = [ 'ref_class0.nii', 'ref_class1.nii' ]
>>> overlap.inputs.in_tst = [ 'tst_class0.nii', 'tst_class1.nii' ]
>>> overlap.inputs.weighting = 'volume'
>>> res = overlap.run() # doctest: +SKIP
"""
input_spec = FuzzyOverlapInputSpec
output_spec = FuzzyOverlapOutputSpec
def _run_interface(self, runtime):
# Load data
refdata = nb.concat_images(self.inputs.in_ref).dataobj
tstdata = nb.concat_images(self.inputs.in_tst).dataobj
# Data must have same shape
if not refdata.shape == tstdata.shape:
raise RuntimeError(
'Size of "in_tst" %s must match that of "in_ref" %s.'
% (tstdata.shape, refdata.shape)
)
ncomp = refdata.shape[-1]
# Load mask
mask = np.ones_like(refdata, dtype=bool)
if isdefined(self.inputs.in_mask):
mask = np.asanyarray(nb.load(self.inputs.in_mask).dataobj) > 0
mask = np.repeat(mask[..., np.newaxis], ncomp, -1)
assert mask.shape == refdata.shape
# Drop data outside mask
refdata = refdata[mask]
tstdata = tstdata[mask]
if np.any(refdata < 0.0):
iflogger.warning(
'Negative values encountered in "in_ref" input, '
"taking absolute values."
)
refdata = np.abs(refdata)
if np.any(tstdata < 0.0):
iflogger.warning(
'Negative values encountered in "in_tst" input, '
"taking absolute values."
)
tstdata = np.abs(tstdata)
if np.any(refdata > 1.0):
iflogger.warning(
'Values greater than 1.0 found in "in_ref" input, ' "scaling values."
)
refdata /= refdata.max()
if np.any(tstdata > 1.0):
iflogger.warning(
'Values greater than 1.0 found in "in_tst" input, ' "scaling values."
)
tstdata /= tstdata.max()
numerators = np.atleast_2d(np.minimum(refdata, tstdata).reshape((-1, ncomp)))
denominators = np.atleast_2d(np.maximum(refdata, tstdata).reshape((-1, ncomp)))
jaccards = numerators.sum(axis=0) / denominators.sum(axis=0)
# Calculate weights
weights = np.ones_like(jaccards, dtype=float)
if self.inputs.weighting != "none":
volumes = np.sum((refdata + tstdata) > 0, axis=1).reshape((-1, ncomp))
weights = 1.0 / volumes
if self.inputs.weighting == "squared_vol":
weights = weights**2
weights = weights / np.sum(weights)
dices = 2.0 * jaccards / (jaccards + 1.0)
# Fill-in the results object
self._results["jaccard"] = float(weights.dot(jaccards))
self._results["dice"] = float(weights.dot(dices))
self._results["class_fji"] = [float(v) for v in jaccards]
self._results["class_fdi"] = [float(v) for v in dices]
return runtime
class ErrorMapInputSpec(BaseInterfaceInputSpec):
in_ref = File(
exists=True,
mandatory=True,
desc="Reference image. Requires the same dimensions as in_tst.",
)
in_tst = File(
exists=True,
mandatory=True,
desc="Test image. Requires the same dimensions as in_ref.",
)
mask = File(exists=True, desc="calculate overlap only within this mask.")
metric = traits.Enum(
"sqeuclidean",
"euclidean",
desc="error map metric (as implemented in scipy cdist)",
usedefault=True,
mandatory=True,
)
out_map = File(desc="Name for the output file")
class ErrorMapOutputSpec(TraitedSpec):
out_map = File(exists=True, desc="resulting error map")
distance = traits.Float(desc="Average distance between volume 1 and 2")
class ErrorMap(BaseInterface):
"""Calculates the error (distance) map between two input volumes.
Example
-------
>>> errormap = ErrorMap()
>>> errormap.inputs.in_ref = 'cont1.nii'
>>> errormap.inputs.in_tst = 'cont2.nii'
>>> res = errormap.run() # doctest: +SKIP
"""
input_spec = ErrorMapInputSpec
output_spec = ErrorMapOutputSpec
_out_file = ""
def _run_interface(self, runtime):
# Get two numpy data matrices
nii_ref = nb.load(self.inputs.in_ref)
ref_data = np.squeeze(nii_ref.dataobj)
tst_data = np.squeeze(nb.load(self.inputs.in_tst).dataobj)
assert ref_data.ndim == tst_data.ndim
# Load mask
comps = 1
mapshape = ref_data.shape
if ref_data.ndim == 4:
comps = ref_data.shape[-1]
mapshape = ref_data.shape[:-1]
if isdefined(self.inputs.mask):
msk = np.asanyarray(nb.load(self.inputs.mask).dataobj)
if mapshape != msk.shape:
raise RuntimeError(
"Mask should match volume shape, \
mask is %s and volumes are %s"
% (list(msk.shape), list(mapshape))
)
else:
msk = np.ones(shape=mapshape)
# Flatten both volumes and make the pixel differennce
mskvector = msk.reshape(-1)
msk_idxs = np.where(mskvector == 1)
refvector = ref_data.reshape(-1, comps)[msk_idxs].astype(np.float32)
tstvector = tst_data.reshape(-1, comps)[msk_idxs].astype(np.float32)
diffvector = refvector - tstvector
# Scale the difference
if self.inputs.metric == "sqeuclidean":
errvector = diffvector**2
if comps > 1:
errvector = np.sum(errvector, axis=1)
else:
errvector = np.squeeze(errvector)
elif self.inputs.metric == "euclidean":
errvector = | np.linalg.norm(diffvector, axis=1) | numpy.linalg.norm |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for `utils_np_test` and `utils_tf_test`.
This provides a base class for tests involving `graphs.GraphsTuple`
containing either numpy or tensorflow data. This base class is populated with
test data and also provides a convenience method for asserting graph equality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
from graph_nets import graphs
from graph_nets import utils_np
import numpy as np
import tensorflow as tf
@contextlib.contextmanager
def assert_new_op_prefixes(test, expected_prefix, assert_some_new_ops=True):
"""Asserts the namescope of tf ops created within the context manager."""
ops_before = [n.name for n in tf.get_default_graph().as_graph_def().node]
yield
ops_after = [n.name for n in tf.get_default_graph().as_graph_def().node]
new_ops = set(ops_after) - set(ops_before)
prefix_length = len(expected_prefix)
if assert_some_new_ops:
test.assertNotEqual(0, len(new_ops))
for op_name in new_ops:
test.assertEqual(expected_prefix, op_name[:prefix_length])
def mask_leading_dimension(tensor):
return tf.placeholder_with_default(tensor,
[None] + tensor.get_shape().as_list()[1:])
class GraphsTest(tf.test.TestCase):
"""A base class for tests that operate on GraphsNP or GraphsTF."""
def populate_test_data(self, max_size):
"""Populates the class fields with data used for the tests.
This creates a batch of graphs with number of nodes from 0 to `num`,
number of edges ranging from 1 to `num`, plus an empty graph with no nodes
and no edges (so that the total number of graphs is 1 + (num ** (num + 1)).
The nodes states, edges states and global states of the graphs are
created to have different types and shapes.
Those graphs are stored both as dictionaries (in `self.graphs_dicts_in`,
without `n_node` and `n_edge` information, and in `self.graphs_dicts_out`
with these two fields filled), and a corresponding numpy
`graphs.GraphsTuple` is stored in `self.reference_graph`.
Args:
max_size: The maximum number of nodes and edges (inclusive).
"""
filt = lambda x: (x[0] > 0) or (x[1] == 0)
n_node, n_edge = zip(*list(
filter(filt, itertools.product(
range(max_size + 1), range(max_size + 1)))))
graphs_dicts = []
nodes = []
edges = []
receivers = []
senders = []
globals_ = []
def _make_default_state(shape, dtype):
return np.arange(np.prod(shape)).reshape(shape).astype(dtype)
for i, (n_node_, n_edge_) in enumerate(zip(n_node, n_edge)):
n = _make_default_state([n_node_, 7, 11], "f4") + i * 100.
e = _make_default_state([n_edge_, 13, 14], np.float64) + i * 100. + 1000.
r = _make_default_state([n_edge_], np.int32) % n_node[i]
s = (_make_default_state([n_edge_], np.int32) + 1) % n_node[i]
g = _make_default_state([5, 3], "f4") - i * 100. - 1000.
nodes.append(n)
edges.append(e)
receivers.append(r)
senders.append(s)
globals_.append(g)
graphs_dict = dict(nodes=n, edges=e, receivers=r, senders=s, globals=g)
graphs_dicts.append(graphs_dict)
# Graphs dicts without n_node / n_edge (to be used as inputs).
self.graphs_dicts_in = graphs_dicts
# Graphs dicts with n_node / n_node (to be checked against outputs).
self.graphs_dicts_out = []
for dict_ in self.graphs_dicts_in:
completed_dict = dict_.copy()
completed_dict["n_node"] = completed_dict["nodes"].shape[0]
completed_dict["n_edge"] = completed_dict["edges"].shape[0]
self.graphs_dicts_out.append(completed_dict)
# pylint: disable=protected-access
offset = utils_np._compute_stacked_offsets(n_node, n_edge)
# pylint: enable=protected-access
self.reference_graph = graphs.GraphsTuple(**dict(
nodes=np.concatenate(nodes, axis=0),
edges=np.concatenate(edges, axis=0),
receivers=np.concatenate(receivers, axis=0) + offset,
senders=np.concatenate(senders, axis=0) + offset,
globals=np.stack(globals_),
n_node= | np.array(n_node) | numpy.array |
import pyfits
import chimera
from chimera import config
import numpy as np
from pyraf import iraf
from datetime import datetime, timedelta
MONTHS = {"Jan": 1, "Feb": 2, "March": 3, "April": 4, "May": 5, "June": 6, "July": 7, "Aug": 8, "Sept": 9, "Oct": 10, "Nov": 11, "Dec": 12}
class Aperphot:
def __init__(self, sci_file, coords):
self.sci_file = sci_file
self.coords = coords
# load configuration file
cfg = config.Config()
self.cfg_data = cfg.load()
# Set header keyword parameters
self.setkeywords()
# Set parameters
self.setparams()
def setkeywords(self):
"""
Set FITS image header keyword parameters.
Parameters
----------
Returns
-------
None
"""
header = pyfits.getheader(self.sci_file, ignore_missing_end = True)
self.nx = header["NAXIS1"]
self.ny = header["NAXIS2"]
self.nframes = header["NAXIS3"]
self.exptime = header["EXPTIME"]
self.kintime = header["KINCYCTI"]
self.sn = header["SERIALN"].split("=")[1].strip()
self.amptype = header["AMPTYPE"].split()[0]
self.emgain = header["EMGAIN"]
self.hreadout = header["HREADOUT"].strip()
self.preampg = header["PREAMPG"].strip()
utcstart = header["UTCSTART"]
self.utcstart = self.parser(utcstart)
return
def setparams(self):
"""
Set datapars, centerpars, fitskypars and photpars.
Parameteres
-----------
Returns
-------
None
"""
# Set parameters for daophot
self.fwhmpsf = self.cfg_data["Phot"]["fwhmpsf"]
self.sigma = self.cfg_data["Phot"]["sigma"]
self.exposure = self.cfg_data["Phot"]["exposure"]
self.calgorithm = self.cfg_data["Phot"]["calgorithm"]
self.cbox = self.cfg_data["Phot"]["cbox"]
self.maxshift = self.cfg_data["Phot"]["maxshift"]
self.salgorithm = self.cfg_data["Phot"]["salgorithm"]
self.annulus = self.cfg_data["Phot"]["annulus"]
self.dannulus = self.cfg_data["Phot"]["dannulus"]
self.apertures = self.cfg_data["Phot"]["apertures"]
self.zmag = self.cfg_data["Phot"]["zmag"]
self.readnoise = float(self.cfg_data["Detector"][self.sn][self.amptype][self.hreadout][self.preampg][1])
self.epadu = float(self.cfg_data["Detector"][self.sn][self.amptype][self.hreadout][self.preampg][0])
if self.amptype == "EMGAIN":
self.readnoise /= self.emgain
self.epadu /= self.emgain
# Set parameters for phot
self.method = "exact"
self.inner_radius = 14
self.outer_radius = 30
return
def setiraf(self):
"""
Set IRAF global parameters and load DAOPHOT package for aperture
photometry.
Parameters
----------
Returns
-------
None
"""
iraf.prcacheOff()
iraf.set(writepars=0)
# Load IRAF packages
iraf.noao(_doprint = 0)
iraf.noao.digiphot(_doprint = 0)
iraf.noao.digiphot.daophot(_doprint = 0)
return
def parser(self, utcstart):
"""
Datetime parser for CHIMERA UTCSTART header keyword.
Parameters
----------
utcstart : string
Datetime for start of frame (in UTC)
Returns
-------
dt : datetime struct
Datetime structure
"""
month, date, year, time = utcstart.split("-")
month = MONTHS[month]
date = int(date)
year = int(year)
hour, minu, sec = time.split(":")
hour = int(hour)
minu = int(minu)
sec, ms = sec.split(".")
sec = int(sec)
ms = int(ms) * 1000
dt = datetime(year, month, date, hour, minu, sec, ms)
return dt
def addtime(self, secs):
"""
Add time in seconds to UTC datetime.
Parameters
----------
secs : float
Time to add to UTC in seconds.
Returns
-------
dt : datetime structure
"""
td = timedelta(0, secs)
return self.utcstart + td
def daocog(self, tolerance = 0.01):
"""
Curve of growth to determine nominal aperture for photometry using DAOPHOT.
Parameters
----------
tolerance : float
Magnitude difference tolerance between different apertures
Returns
-------
aperture : float
Nominal aperture radius for photmetry
"""
# load iraf packages
self.setiraf()
# Randomly peform curve of growth on 5 frames
framenum = np.random.randint(1, self.nframes, 5)
apertures = np.linspace(2,20,19)
# Iterate through the frames and determine nominal aperture
nom_aper = np.zeros(5, dtype = np.float32)
cnt = 0
for val in framenum:
outfile = self.sci_file.replace(".fits", "." + str(val) + ".cog.phot.1")
iraf.delete(outfile)
self.daophot(val, self.coords, outfile, apertures = "2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20")
mags = iraf.pdump(outfile, "mag", "yes", Stdout = 1)
mags = [mag if mag != 'INDEF' else 30.0 for mag in mags]
mags_arr = np.array(mags[1].split(),dtype = np.float32)
mags_diff = np.diff(mags_arr)
idx = np.where((np.abs(mags_diff) < tolerance) & (np.abs(mags_diff) != 0.0))
if len(idx[0]) != 0:
nom_aper[cnt] = apertures[idx[0][0]]
else:
nom_aper[cnt] = 10.0
cnt += 1
iraf.delete(outfile)
return np.median(nom_aper)
def cog(self, window_size, method, tolerance = 0.01):
"""
Curve of growth to determine nominal aperture for photometry using
astropy photutils.
Parameters
----------
tolerance : float
Magnitude difference tolerance between different apertures
Returns
-------
aperture : float
Nominal aperture radius for photmetry
"""
# Aperture values in pixels
apertures = np.array([2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
naper = apertures.shape[0]
# Randomly perform curve of growth on 5 frames
framenum = np.random.randint(1, self.nframes, 5)
apertures = np.linspace(2,20,19)
# Read input image and star position
image = chimera.fitsread(self.sci_file)
pos = np.loadtxt(self.coords, ndmin = 2)
# Iterate through the frames and determine nominal aperture
nom_aper = np.zeros(5, dtype = np.float32)
cnt = 0
for val in framenum:
mags_arr = np.zeros(len(apertures))
objpos = chimera.recenter(image[val,:,:], pos, window_size, method)
for i in range(naper):
flux = self.phot(image[val,:,:], objpos, aper = apertures[i])
try:
mags_arr[i] = -2.5 * np.log10(flux['flux'])
except:
mags_arr[i] = -2.5 * | np.log10(flux['flux'][1]) | numpy.log10 |
"""Create Figures and Extract Results for CVPR paper.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function
import os
from os.path import join, isdir, isfile
from collections import OrderedDict
import cPickle as pickle
import numpy as np
from scipy.misc import imsave
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import spiker
from spiker.models import utils
from spiker.data import ddd17
def compute_log_curve(log_name, num_runs=4):
"""Compute log curve, provide mean and standard deviation."""
log_collector = []
for run_idx in range(1, num_runs+1):
# prepare log path
log_file = join(spiker.SPIKER_EXPS+"-run-%d" % (run_idx),
log_name, "csv_history.log")
log_dict = utils.parse_csv_log(log_file)
log_collector.append(log_dict)
# compute for train loss
train_loss = np.vstack(
(log_collector[0]["loss"][np.newaxis, ...],
log_collector[1]["loss"][np.newaxis, ...],
log_collector[2]["loss"][np.newaxis, ...],
log_collector[3]["loss"][np.newaxis, ...]))
train_loss = train_loss.astype("float64")
train_loss_mean = np.mean(train_loss, axis=0)
train_loss_std = np.std(train_loss, axis=0)
# compute for test loss
test_loss = np.vstack(
(log_collector[0]["val_loss"][np.newaxis, ...],
log_collector[1]["val_loss"][np.newaxis, ...],
log_collector[2]["val_loss"][np.newaxis, ...],
log_collector[3]["val_loss"][np.newaxis, ...]))
test_loss = test_loss.astype("float64")
test_loss_mean = np.mean(test_loss, axis=0)
test_loss_std = np.std(test_loss, axis=0)
# compute for train mse
train_mse = np.vstack(
(log_collector[0]["mean_squared_error"][np.newaxis, ...],
log_collector[1]["mean_squared_error"][np.newaxis, ...],
log_collector[2]["mean_squared_error"][np.newaxis, ...],
log_collector[3]["mean_squared_error"][np.newaxis, ...]))
train_mse = train_mse.astype("float64")
train_mse_mean = np.mean(train_mse, axis=0)
train_mse_std = np.std(train_mse, axis=0)
# compute for test mse
test_mse = np.vstack(
(log_collector[0]["val_mean_squared_error"][np.newaxis, ...],
log_collector[1]["val_mean_squared_error"][np.newaxis, ...],
log_collector[2]["val_mean_squared_error"][np.newaxis, ...],
log_collector[3]["val_mean_squared_error"][np.newaxis, ...]))
test_mse = test_mse.astype("float64")
test_mse_mean = np.mean(test_mse, axis=0)
test_mse_std = np.std(test_mse, axis=0)
trloss = (train_loss_mean, train_loss_std)
teloss = (test_loss_mean, test_loss_std)
trmse = (train_mse_mean, train_mse_std)
temse = (test_mse_mean, test_mse_std)
return trloss, teloss, trmse, temse
def get_best_result(log_dict, mode="regress"):
"""Get result from a list of log files."""
logs = OrderedDict()
sum_score = 0
for log_item in log_dict:
csv_log = utils.parse_csv_log(log_dict[log_item])
if mode == "regress":
logs[log_item] = np.min(csv_log["val_mean_squared_error"])
sum_score += logs[log_item]
elif mode == "class":
logs[log_item] = np.max(csv_log["val_accuracy"])
sum_score += logs[log_item]
elif mode == "binary":
logs[log_item] = np.max(csv_log["val_binary_accuracy"])
sum_score += logs[log_item]
return logs, sum_score
def get_log_file_dict(env="day", mode="full", task="steering",
exp_dir=spiker.SPIKER_EXPS):
"""Get data."""
data_range = 8 if env == "day" else 7
log_file_dict = OrderedDict()
for idx in xrange(data_range):
file_base = task+"-"+env+"-%d-" % (idx+1)+mode
log_file_dict[file_base] = join(exp_dir, file_base,
"csv_history.log")
return log_file_dict
def get_log_file_dict_review(env="day", mode="full", task="steering",
exp_dir=spiker.SPIKER_EXPS):
"""Get data."""
data_range = 7 if env == "day" else 8
log_file_dict = OrderedDict()
for idx in xrange(data_range):
file_base = task+"-"+env+"-%d-" % (idx+1)+mode+"-review"
log_file_dict[file_base] = join(exp_dir, file_base,
"csv_history.log")
return log_file_dict
# option = "get-full-results"
# option = "get-dvs-results"
# option = "get-aps-results"
# option = "get-loss-curves"
# option = "get-results-reproduce"
# option = "get-results-reproduce-steer"
option = "get-steering-results"
# option = "get-steering-results-review"
# option = "get-steering-results-combined"
# option = "attribute-hist"
# option = "get-steer-loss-curves"
# option = "get-results-reproduce-steer-all"
# option = "export-images-for-dataset"
# option = "export-rate"
# option = "align-fps-events"
if option == "get-full-results":
steer_day_logs = get_log_file_dict("day", "full", "steering")
accel_day_logs = get_log_file_dict("day", "full", "accel")
brake_day_logs = get_log_file_dict("day", "full", "brake")
steer_night_logs = get_log_file_dict("night", "full", "steering")
accel_night_logs = get_log_file_dict("night", "full", "accel")
brake_night_logs = get_log_file_dict("night", "full", "brake")
# get results
steer_day_res, steer_day_sum = get_best_result(steer_day_logs)
accel_day_res, accel_day_sum = get_best_result(accel_day_logs)
brake_day_res, brake_day_sum = get_best_result(
brake_day_logs, mode="binary")
steer_night_res, steer_night_sum = get_best_result(steer_night_logs)
accel_night_res, accel_night_sum = get_best_result(accel_night_logs)
brake_night_res, brake_night_sum = get_best_result(
brake_night_logs, mode="binary")
print ("-"*30)
for key in steer_night_res:
print (key, ":", np.sqrt(steer_night_res[key])*180/np.pi)
for key in steer_day_res:
print (key, ":", np.sqrt(steer_day_res[key])*180/np.pi)
print (np.sqrt((steer_day_sum+steer_night_sum)/15)*180/np.pi)
print ("-"*30)
for key in accel_night_res:
print (key, ":", np.sqrt(accel_night_res[key])*100)
for key in accel_day_res:
print (key, ":", np.sqrt(accel_day_res[key])*100)
print (np.sqrt((accel_day_sum+accel_night_sum)/15)*100)
print ("-"*30)
print (brake_night_res)
print (brake_day_res)
print ("-"*30)
print ((brake_day_sum+brake_night_sum)/15)
elif option == "get-steering-results":
sensor_mode = "aps"
# collecting logs
# run 1
day_logs_1 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-1")
night_logs_1 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-1")
# run 2
day_logs_2 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-2")
night_logs_2 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-2")
# run 3
day_logs_3 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-3")
night_logs_3 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-3")
# run 4
day_logs_4 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-4")
night_logs_4 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-4")
# collect results
day_res_1, day_sum_1 = get_best_result(day_logs_1)
night_res_1, night_sum_1 = get_best_result(night_logs_1)
day_res_2, day_sum_2 = get_best_result(day_logs_2)
night_res_2, night_sum_2 = get_best_result(night_logs_2)
day_res_3, day_sum_3 = get_best_result(day_logs_3)
night_res_3, night_sum_3 = get_best_result(night_logs_3)
day_res_4, day_sum_4 = get_best_result(day_logs_4)
night_res_4, night_sum_4 = get_best_result(night_logs_4)
# calculate mean and variance
for key in night_res_1:
temp_res = np.array([night_res_1[key], night_res_2[key],
night_res_3[key],
night_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std())
for key in day_res_1:
temp_res = np.array([day_res_1[key], day_res_2[key], day_res_3[key],
day_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), "std:", temp_res.std(),
"best", temp_res.argmin())
print (temp_res)
avg_error = np.array([day_sum_1+night_sum_1,
day_sum_2+night_sum_2,
day_sum_3+night_sum_3,
day_sum_4+night_sum_4])/15.
avg_error = np.sqrt(avg_error)*180/np.pi
print ("Average Error:", avg_error.mean(), "std:", avg_error.std())
elif option == "get-steering-results-review":
sensor_mode = "full"
# collecting logs
# run 1
day_logs_1 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-1")
night_logs_1 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-1")
# run 2
day_logs_2 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-2")
night_logs_2 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-2")
# run 3
day_logs_3 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-3")
night_logs_3 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-3")
# run 4
day_logs_4 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-4")
night_logs_4 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-4")
# collect results
day_res_1, day_sum_1 = get_best_result(day_logs_1)
night_res_1, night_sum_1 = get_best_result(night_logs_1)
day_res_2, day_sum_2 = get_best_result(day_logs_2)
night_res_2, night_sum_2 = get_best_result(night_logs_2)
day_res_3, day_sum_3 = get_best_result(day_logs_3)
night_res_3, night_sum_3 = get_best_result(night_logs_3)
day_res_4, day_sum_4 = get_best_result(day_logs_4)
night_res_4, night_sum_4 = get_best_result(night_logs_4)
# calculate mean and variance
for key in night_res_1:
temp_res = np.array([night_res_1[key], night_res_2[key],
night_res_3[key],
night_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std())
for key in day_res_1:
temp_res = np.array([day_res_1[key], day_res_2[key], day_res_3[key],
day_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std(),
"best", temp_res.argmin())
# print (temp_res)
avg_error = np.array([day_sum_1+night_sum_1,
day_sum_2+night_sum_2,
day_sum_3+night_sum_3,
day_sum_4+night_sum_4])/15.
avg_error = np.sqrt(avg_error)*180/np.pi
print ("Average Error:", avg_error.mean(), "std:", avg_error.std())
elif option == "get-steering-results-combined":
sensor_mode = "full"
# collecting logs
# run 1
day_logs_1 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-1")
night_logs_1 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-1")
# run 2
day_logs_2 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-2")
night_logs_2 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-2")
# run 3
day_logs_3 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-3")
night_logs_3 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-3")
# run 4
day_logs_4 = get_log_file_dict("day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-4")
night_logs_4 = get_log_file_dict("night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-run-4")
# collect results
day_res_1, day_sum_1 = get_best_result(day_logs_1)
night_res_1, night_sum_1 = get_best_result(night_logs_1)
day_res_2, day_sum_2 = get_best_result(day_logs_2)
night_res_2, night_sum_2 = get_best_result(night_logs_2)
day_res_3, day_sum_3 = get_best_result(day_logs_3)
night_res_3, night_sum_3 = get_best_result(night_logs_3)
day_res_4, day_sum_4 = get_best_result(day_logs_4)
night_res_4, night_sum_4 = get_best_result(night_logs_4)
# calculate mean and variance
for key in night_res_1:
temp_res = np.array([night_res_1[key], night_res_2[key],
night_res_3[key],
night_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std())
for key in day_res_1:
temp_res = np.array([day_res_1[key], day_res_2[key], day_res_3[key],
day_res_4[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std(),
"best", temp_res.argmin())
# collecting logs
# run 1
day_logs_1 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-1")
night_logs_1 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-1")
# run 2
day_logs_2 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-2")
night_logs_2 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-2")
# run 3
day_logs_3 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-3")
night_logs_3 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-3")
# run 4
day_logs_4 = get_log_file_dict_review(
"day", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-4")
night_logs_4 = get_log_file_dict_review(
"night", sensor_mode, "steering",
spiker.SPIKER_EXPS+"-review-4")
# collect results
day_res_1_r, day_sum_1_r = get_best_result(day_logs_1)
night_res_1_r, night_sum_1_r = get_best_result(night_logs_1)
day_res_2_r, day_sum_2_r = get_best_result(day_logs_2)
night_res_2_r, night_sum_2_r = get_best_result(night_logs_2)
day_res_3_r, day_sum_3_r = get_best_result(day_logs_3)
night_res_3_r, night_sum_3_r = get_best_result(night_logs_3)
day_res_4_r, day_sum_4_r = get_best_result(day_logs_4)
night_res_4_r, night_sum_4_r = get_best_result(night_logs_4)
# calculate mean and variance
for key in night_res_1_r:
temp_res = np.array([night_res_1_r[key], night_res_2_r[key],
night_res_3_r[key],
night_res_4_r[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std())
for key in day_res_1_r:
temp_res = np.array([day_res_1_r[key], day_res_2_r[key],
day_res_3_r[key],
day_res_4_r[key]])
temp_res = np.sqrt(temp_res)*180/np.pi
print (key, ":", temp_res.mean(), temp_res.std(),
"best", temp_res.argmin())
avg_error = np.array([day_sum_1+night_sum_1+day_sum_1_r+night_sum_1_r,
day_sum_2+night_sum_2+day_sum_2_r+night_sum_2_r,
day_sum_3+night_sum_3+day_sum_3_r+night_sum_3_r,
day_sum_4+night_sum_4+day_sum_4_r+night_sum_4_r])/30.
avg_error = np.sqrt(avg_error)*180/np.pi
print ("Average Error:", avg_error.mean(), "std:", avg_error.std())
elif option == "get-dvs-results":
steer_day_logs = get_log_file_dict("day", "dvs", "steering")
accel_day_logs = get_log_file_dict("day", "dvs", "accel")
brake_day_logs = get_log_file_dict("day", "dvs", "brake")
steer_night_logs = get_log_file_dict("night", "dvs", "steering")
accel_night_logs = get_log_file_dict("night", "dvs", "accel")
brake_night_logs = get_log_file_dict("night", "dvs", "brake")
# get results
steer_day_res, steer_day_sum = get_best_result(steer_day_logs)
accel_day_res, accel_day_sum = get_best_result(accel_day_logs)
brake_day_res, brake_day_sum = get_best_result(
brake_day_logs, mode="binary")
steer_night_res, steer_night_sum = get_best_result(steer_night_logs)
accel_night_res, accel_night_sum = get_best_result(accel_night_logs)
brake_night_res, brake_night_sum = get_best_result(
brake_night_logs, mode="binary")
print ("-"*30)
for key in steer_night_res:
print (key, ":", np.sqrt(steer_night_res[key])*180/np.pi)
for key in steer_day_res:
print (key, ":", np.sqrt(steer_day_res[key])*180/np.pi)
print ( | np.sqrt((steer_day_sum+steer_night_sum)/15) | numpy.sqrt |
"""
@Origin : main.py by <NAME>
@Contact: <EMAIL>
@Time: 2018/10/13 10:39 PM
modified by {<NAME>, <NAME>}
@Contact: {cat0626, <EMAIL>
@File: train.py
@Time: 2021.09.29
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
import numpy as np
from torch.utils.data import DataLoader
import sklearn.metrics as metrics
import torch.nn.functional as F
from data import ModelNet40
from model import PointNet, DGCNN
from util import cal_loss
from modelnetc_utils import eval_corrupt_wrapper, ModelNetC
def train_vanilla(args, io):
train_loader = DataLoader(ModelNet40(args, partition='train'), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(args, partition='test'), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
if args.model == 'pointnet':
model = PointNet(args).to(device)
elif args.model == 'dgcnn':
model = DGCNN(args).to(device)
else:
raise Exception("Not implemented")
print(str(model))
model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), "GPUs!")
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
scheduler.step()
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = | np.concatenate(test_pred) | numpy.concatenate |
"""
First test at plate detection.
Follow this steps:
1. Turn image greyscale
2. Apply bilateral filter
3. Apply Canny and find contours
4. Get all rectangles from contours
5. Get contours size and sort them
6. Find contours with exactly 4 sides
7. Crop section of image
8. Apply tesseract OCR
"""
import cv2
import numpy as np
import imutils
import pytesseract
from PIL import Image
def auto_canny(image, sigma=0.55):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
image = cv2.imread('plate_dataset/IMG_8123.png')
image = imutils.resize(image, width=1000) # Check if necessary
image = image[100:520, 250:750]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
blackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
add = cv2.add(gray, tophat)
sub = cv2.subtract(add, blackHat)
cv2.imshow("", sub)
cv2.waitKey(0)
ret, thresh = cv2.threshold(sub,30,255,cv2.THRESH_TRIANGLE)
# ret, thresh = cv2.threshold(sub,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, rectKernel)
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=5)
thresh = cv2.dilate(thresh, None, iterations=8)
edged = auto_canny(thresh)
cv2.imshow("thresh.png", thresh)
cv2.waitKey(0)
cv2.imshow("", edged)
cv2.waitKey(0)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:30]
number_plate = []
peri = cv2.arcLength(cnts[0], True)
approx = cv2.approxPolyDP(cnts[0], 0.018 * peri, True)
number_plate.append(approx)
cv2.drawContours(image, number_plate, -1, (0,255,0), 3)
gray = cv2.bilateralFilter(gray,11,17,17)
# Masking the part other than the number plate
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,number_plate,0,255,0)
new_image = cv2.bitwise_and(image,image,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = ( | np.min(x) | numpy.min |
'''
episodestats.py
implements statistic that are used in producing employment statistics for the
lifecycle model
'''
import h5py
import numpy as np
import numpy_financial as npf
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy.stats import norm
#import locale
from tabulate import tabulate
import pandas as pd
import scipy.optimize
from tqdm import tqdm_notebook as tqdm
from . empstats import Empstats
from scipy.stats import gaussian_kde
#locale.setlocale(locale.LC_ALL, 'fi_FI')
def modify_offsettext(ax,text):
'''
For y axis
'''
x_pos = 0.0
y_pos = 1.0
horizontalalignment='left'
verticalalignment='bottom'
offset = ax.yaxis.get_offset_text()
#value=offset.get_text()
# value=float(value)
# if value>=1e12:
# text='biljoonaa'
# elif value>1e9:
# text=str(value/1e9)+' miljardia'
# elif value==1e9:
# text=' miljardia'
# elif value>1e6:
# text=str(value/1e6)+' miljoonaa'
# elif value==1e6:
# text='miljoonaa'
# elif value>1e3:
# text=str(value/1e3)+' tuhatta'
# elif value==1e3:
# text='tuhatta'
offset.set_visible(False)
ax.text(x_pos, y_pos, text, transform=ax.transAxes,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment)
class Labels():
def get_labels(self,language='English'):
labels={}
if language=='English':
labels['osuus tilassa x']='Proportion in state {} [%]'
labels['age']='Age [y]'
labels['ratio']='Proportion [%]'
labels['unemp duration']='Length of unemployment [y]'
labels['scaled freq']='Scaled frequency'
labels['probability']='probability'
labels['telp']='Employee pension premium'
labels['sairausvakuutus']='Health insurance'
labels['työttömyysvakuutusmaksu']='Unemployment insurance'
labels['puolison verot']='Partners taxes'
labels['taxes']='Taxes'
labels['asumistuki']='Housing benefit'
labels['toimeentulotuki']='Supplementary benefit'
labels['tyottomyysturva']='Unemployment benefit'
labels['paivahoito']='Daycare'
labels['elake']='Pension'
labels['tyollisyysaste']='Employment rate'
labels['tyottomien osuus']='Proportion of unemployed'
labels['havainto']='Observation'
labels['tyottomyysaste']='Unemployment rate [%]'
labels['tyottomien osuus']='Proportion of unemployed [%]'
labels['tyollisyysaste %']='Employment rate [%]'
labels['ero osuuksissa']='Difference in proportions [%]'
labels['osuus']='proportion'
labels['havainto, naiset']='data, women'
labels['havainto, miehet']='data, men'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='Life cycle model'
else:
labels['osuus tilassa x']='Osuus tilassa {} [%]'
labels['age']='Ikä [v]'
labels['ratio']='Osuus tilassa [%]'
labels['unemp duration']='työttömyysjakson pituus [v]'
labels['scaled freq']='skaalattu taajuus'
labels['probability']='todennäköisyys'
labels['telp']='TEL-P'
labels['sairausvakuutus']='Sairausvakuutus'
labels['työttömyysvakuutusmaksu']='Työttömyysvakuutusmaksu'
labels['puolison verot']='puolison verot'
labels['taxes']='Verot'
labels['asumistuki']='Asumistuki'
labels['toimeentulotuki']='Toimeentulotuki'
labels['tyottomyysturva']='Työttömyysturva'
labels['paivahoito']='Päivähoito'
labels['elake']='Eläke'
labels['tyollisyysaste']='työllisyysaste'
labels['tyottomien osuus']='työttömien osuus'
labels['havainto']='havainto'
labels['tyottomyysaste']='Työttömyysaste [%]'
labels['tyottomien osuus']='Työttömien osuus väestöstö [%]'
labels['tyollisyysaste %']='Työllisyysaste [%]'
labels['ero osuuksissa']='Ero osuuksissa [%]'
labels['osuus']='Osuus'
labels['havainto, naiset']='havainto, naiset'
labels['havainto, miehet']='havainto, miehet'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='elinkaarimalli'
return labels
class EpisodeStats():
def __init__(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year=2018,version=3,params=None,gamma=0.92,lang='English'):
self.version=version
self.gamma=gamma
self.params=params
self.lab=Labels()
self.reset(timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,params=params,lang=lang)
print('version',version)
def reset(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,version=None,params=None,lang=None,dynprog=False):
self.min_age=min_age
self.max_age=max_age
self.min_retirementage=min_retirementage
self.minimal=minimal
if params is not None:
self.params=params
if lang is None:
self.language='English'
else:
self.language=lang
if version is not None:
self.version=version
self.setup_labels()
self.n_employment=n_emps
self.n_time=n_time
self.timestep=timestep # 0.25 = 3kk askel
self.inv_timestep=int(np.round(1/self.timestep)) # pitää olla kokonaisluku
self.n_pop=n_pop
self.year=year
self.env=env
self.reaalinen_palkkojenkasvu=0.016
self.palkkakerroin=(0.8*1+0.2*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.elakeindeksi=(0.2*1+0.8*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.dynprog=dynprog
if self.minimal:
self.version=0
if self.version in set([0,101]):
self.n_groups=1
else:
self.n_groups=6
self.empstats=Empstats(year=self.year,max_age=self.max_age,n_groups=self.n_groups,timestep=self.timestep,n_time=self.n_time,
min_age=self.min_age)
self.init_variables()
def init_variables(self):
n_emps=self.n_employment
self.empstate=np.zeros((self.n_time,n_emps))
self.gempstate=np.zeros((self.n_time,n_emps,self.n_groups))
self.deceiced=np.zeros((self.n_time,1))
self.alive=np.zeros((self.n_time,1))
self.galive=np.zeros((self.n_time,self.n_groups))
self.rewstate=np.zeros((self.n_time,n_emps))
self.poprewstate=np.zeros((self.n_time,self.n_pop))
self.salaries_emp=np.zeros((self.n_time,n_emps))
#self.salaries=np.zeros((self.n_time,self.n_pop))
self.actions=np.zeros((self.n_time,self.n_pop))
self.popempstate=np.zeros((self.n_time,self.n_pop))
self.popunemprightleft=np.zeros((self.n_time,self.n_pop))
self.popunemprightused=np.zeros((self.n_time,self.n_pop))
self.tyoll_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.unemp_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.siirtyneet=np.zeros((self.n_time,n_emps))
self.siirtyneet_det=np.zeros((self.n_time,n_emps,n_emps))
self.pysyneet=np.zeros((self.n_time,n_emps))
self.aveV=np.zeros((self.n_time,self.n_pop))
self.time_in_state=np.zeros((self.n_time,n_emps))
self.stat_tyoura=np.zeros((self.n_time,n_emps))
self.stat_toe=np.zeros((self.n_time,n_emps))
self.stat_pension=np.zeros((self.n_time,n_emps))
self.stat_paidpension=np.zeros((self.n_time,n_emps))
self.out_of_work=np.zeros((self.n_time,n_emps))
self.stat_unemp_len=np.zeros((self.n_time,self.n_pop))
self.stat_wage_reduction=np.zeros((self.n_time,n_emps))
self.stat_wage_reduction_g=np.zeros((self.n_time,n_emps,self.n_groups))
self.infostats_group=np.zeros((self.n_pop,1))
self.infostats_taxes=np.zeros((self.n_time,1))
self.infostats_wagetaxes=np.zeros((self.n_time,1))
self.infostats_taxes_distrib=np.zeros((self.n_time,n_emps))
self.infostats_etuustulo=np.zeros((self.n_time,1))
self.infostats_etuustulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_perustulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo_eielakkeella=np.zeros((self.n_time,1))
self.infostats_palkkatulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_palkkatulo_eielakkeella_group=np.zeros((self.n_time,1))
self.infostats_ansiopvraha=np.zeros((self.n_time,1))
self.infostats_ansiopvraha_group=np.zeros((self.n_time,self.n_groups))
self.infostats_asumistuki=np.zeros((self.n_time,1))
self.infostats_asumistuki_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero=np.zeros((self.n_time,1))
self.infostats_valtionvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_kunnallisvero=np.zeros((self.n_time,1))
self.infostats_kunnallisvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_kunnallisvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_ptel=np.zeros((self.n_time,1))
self.infostats_tyotvakmaksu=np.zeros((self.n_time,1))
self.infostats_tyoelake=np.zeros((self.n_time,1))
self.infostats_kokoelake=np.zeros((self.n_time,1))
self.infostats_opintotuki=np.zeros((self.n_time,1))
self.infostats_isyyspaivaraha=np.zeros((self.n_time,1))
self.infostats_aitiyspaivaraha=np.zeros((self.n_time,1))
self.infostats_kotihoidontuki=np.zeros((self.n_time,1))
self.infostats_sairauspaivaraha=np.zeros((self.n_time,1))
self.infostats_toimeentulotuki=np.zeros((self.n_time,1))
self.infostats_tulot_netto=np.zeros((self.n_time,1))
self.infostats_pinkslip=np.zeros((self.n_time,n_emps))
self.infostats_pop_pinkslip=np.zeros((self.n_time,self.n_pop))
self.infostats_chilren18_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren7_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren18=np.zeros((self.n_time,1))
self.infostats_chilren7=np.zeros((self.n_time,1))
self.infostats_tyelpremium=np.zeros((self.n_time,self.n_pop))
self.infostats_paid_tyel_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_sairausvakuutus=np.zeros((self.n_time))
self.infostats_pvhoitomaksu=np.zeros((self.n_time,self.n_pop))
self.infostats_ylevero=np.zeros((self.n_time,1))
self.infostats_ylevero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_irr=np.zeros((self.n_pop,1))
self.infostats_npv0=np.zeros((self.n_pop,1))
self.infostats_mother_in_workforce=np.zeros((self.n_time,1))
self.infostats_children_under3=np.zeros((self.n_time,self.n_pop))
self.infostats_children_under7=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis_acc=np.zeros((self.n_time,self.n_pop))
self.infostats_toe=np.zeros((self.n_time,self.n_pop))
self.infostats_ove=np.zeros((self.n_time,n_emps))
self.infostats_kassanjasen=np.zeros((self.n_time))
self.infostats_poptulot_netto=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_wage=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_equivalent_income=np.zeros(self.n_time)
self.infostats_alv=np.zeros(self.n_time)
self.infostats_puoliso=np.zeros(self.n_time)
self.pop_predrew=np.zeros((self.n_time,self.n_pop))
if self.version==101:
self.infostats_savings=np.zeros((self.n_time,self.n_pop))
self.sav_actions=np.zeros((self.n_time,self.n_pop))
def add(self,n,act,r,state,newstate,q=None,debug=False,plot=False,aveV=None,pred_r=None):
if self.version==0:
emp,_,_,a,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
puoliso=0
elif self.version==1:
# v1
emp,_,_,_,a,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,oof,bu,wr,p=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==2:
# v2
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==3:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58,ove,jasen=self.env.state_decode(newstate)
puoliso=0
elif self.version==4:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,\
c3,c7,c18,unemp_left,aa,toe58,ove,jasen,puoliso,puoliso_tyossa,puoliso_palkka=self.env.state_decode(newstate)
elif self.version==101:
emp,_,_,a,_,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage,savings=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
t=int(np.round((a2-self.min_age)*self.inv_timestep))#-1
if a2>a and newemp>=0: # new state is not reset (age2>age)
if a2>self.min_retirementage and newemp==3 and self.version in set([1,2,3,4]):
newemp=2
if self.version in set([1,2,3,4]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.poprewstate[t,n]=r
self.actions[t,n]=act
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
if tis<=0.25 and newemp==5:
self.infostats_mother_in_workforce[t]+=1
self.infostats_pinkslip[t,newemp]+=pink
self.infostats_pop_pinkslip[t,n]=pink
self.gempstate[t,newemp,g]+=1
self.stat_wage_reduction[t,newemp]+=wr
self.stat_wage_reduction_g[t,newemp,g]+=wr
self.galive[t,g]+=1
self.stat_tyoura[t,newemp]+=ura
self.stat_toe[t,newemp]+=toe
self.stat_pension[t,newemp]+=newpen
self.stat_paidpension[t,newemp]+=paidpens
self.stat_unemp_len[t,n]=tis
self.popunemprightleft[t,n]=-self.env.unempright_left(newemp,tis,bu,a2,ura)
self.popunemprightused[t,n]=bu
self.infostats_group[n]=int(g)
self.infostats_unempwagebasis[t,n]=uw
self.infostats_unempwagebasis_acc[t,n]=uwr
self.infostats_toe[t,n]=toe
self.infostats_ove[t,newemp]+=ove
self.infostats_kassanjasen[t]+=jasen
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
self.infostats_puoliso[t]+=puoliso
if q is not None:
#print(newsal,q['palkkatulot'])
self.infostats_taxes[t]+=q['verot']*self.timestep*12
self.infostats_wagetaxes[t]+=q['verot_ilman_etuuksia']*self.timestep*12
self.infostats_taxes_distrib[t,newemp]+=q['verot']*self.timestep*12
self.infostats_etuustulo[t]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_etuustulo_group[t,g]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_perustulo[t]+=q['perustulo']*self.timestep*12
self.infostats_palkkatulo[t]+=q['palkkatulot']*self.timestep*12
self.infostats_palkkatulo_eielakkeella[t]+=q['palkkatulot_eielakkeella']*self.timestep*12
self.infostats_ansiopvraha[t]+=q['ansiopvraha']*self.timestep*12
self.infostats_asumistuki[t]+=q['asumistuki']*self.timestep*12
self.infostats_valtionvero[t]+=q['valtionvero']*self.timestep*12
self.infostats_valtionvero_distrib[t,newemp]+=q['valtionvero']*self.timestep*12
self.infostats_kunnallisvero[t]+=q['kunnallisvero']*self.timestep*12
self.infostats_kunnallisvero_distrib[t,newemp]+=q['kunnallisvero']*self.timestep*12
self.infostats_ptel[t]+=q['ptel']*self.timestep*12
self.infostats_tyotvakmaksu[t]+=q['tyotvakmaksu']*self.timestep*12
self.infostats_tyoelake[t]+=q['elake_maksussa']*self.timestep*12
self.infostats_kokoelake[t]+=q['kokoelake']*self.timestep*12
self.infostats_opintotuki[t]+=q['opintotuki']*self.timestep*12
self.infostats_isyyspaivaraha[t]+=q['isyyspaivaraha']*self.timestep*12
self.infostats_aitiyspaivaraha[t]+=q['aitiyspaivaraha']*self.timestep*12
self.infostats_kotihoidontuki[t]+=q['kotihoidontuki']*self.timestep*12
self.infostats_sairauspaivaraha[t]+=q['sairauspaivaraha']*self.timestep*12
self.infostats_toimeentulotuki[t]+=q['toimtuki']*self.timestep*12
self.infostats_tulot_netto[t]+=q['kateen']*self.timestep*12
self.infostats_tyelpremium[t,n]=q['tyel_kokomaksu']*self.timestep*12
self.infostats_paid_tyel_pension[t,n]=q['puhdas_tyoelake']*self.timestep*12
self.infostats_sairausvakuutus[t]+=q['sairausvakuutus']*self.timestep*12
self.infostats_pvhoitomaksu[t,n]=q['pvhoito']*self.timestep*12
self.infostats_ylevero[t]+=q['ylevero']*self.timestep*12
self.infostats_ylevero_distrib[t,newemp]=q['ylevero']*self.timestep*12
self.infostats_poptulot_netto[t,n]=q['kateen']*self.timestep*12
self.infostats_children_under3[t,n]=c3
self.infostats_children_under7[t,n]=c7
self.infostats_npv0[n]=q['multiplier']
self.infostats_equivalent_income[t]+=q['eq']
if 'alv' in q:
self.infostats_alv[t]+=q['alv']
#self.infostats_kassanjasen[t]+=1
elif self.version in set([0,101]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.infostats_tulot_netto[t]+=q['netto'] # already at annual level
self.infostats_poptulot_netto[t,n]=q['netto']
self.poprewstate[t,n]=r
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
self.infostats_equivalent_income[t]+=q['eq']
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
if self.dynprog and pred_r is not None:
self.pop_predrew[t,n]=pred_r
if self.version==101:
self.infostats_savings[t,n]=savings
self.actions[t,n]=act[0]
self.sav_actions[t,n]=act[1]
else:
self.actions[t,n]=act
# if self.version in set([1,2,3]):
# self.gempstate[t,newemp,g]+=1
# self.stat_wage_reduction[t,newemp]+=wr
# self.galive[t,g]+=1
# self.stat_tyoura[t,newemp]+=ura
# self.stat_toe[t,newemp]+=toe
# self.stat_pension[t,newemp]+=newpen
# self.stat_paidpension[t,newemp]+=paidpens
# self.stat_unemp_len[t,n]=tis
# self.popunemprightleft[t,n]=0
# self.popunemprightused[t,n]=0
if aveV is not None:
self.aveV[t,n]=aveV
if not emp==newemp:
self.siirtyneet[t,emp]+=1
self.siirtyneet_det[t,emp,newemp]+=1
else:
self.pysyneet[t,emp]+=1
elif newemp<0:
self.deceiced[t]+=1
def scale_error(self,x,target=None,averaged=False):
return (target-self.comp_scaled_consumption(x,averaged=averaged))
def comp_employed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=np.squeeze(self.gempstate[:,:,g])
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
return tyoll_osuus,htv_osuus
def comp_employed_aggregate(self,emp=None,start=20,end=63.5,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])/nn
htv_osuus=self.comp_state_stats(htv_osuus,start=start,end=end,ratio=True)
tyoll_osuus=self.comp_state_stats(tyoll_osuus,start=start,end=end,ratio=True)
return tyoll_osuus,htv_osuus
def comp_group_ps(self):
return self.comp_palkkasumma(grouped=True)
def comp_palkkasumma(self,start=19,end=68,grouped=False,scale_time=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
if grouped:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,6))
ps_norw=np.zeros((self.n_time,6))
a_ps=np.zeros(6)
a_ps_norw=np.zeros(6)
for k in range(self.n_pop):
g=int(self.infostats_group[k,0])
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,g]+=self.infostats_pop_wage[t,k]
ps_norw[t,g]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,g]+=self.infostats_pop_wage[t,k]*self.timestep
for g in range(6):
a_ps[g]=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage,g])
a_ps_norw[g]=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage,g])
else:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,1))
ps_norw=np.zeros((self.n_time,1))
for k in range(self.n_pop):
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,0]+=self.infostats_pop_wage[t,k]
ps_norw[t,0]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,0]+=self.infostats_pop_wage[t,k]
a_ps=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage])
a_ps_norw=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage])
return a_ps,a_ps_norw
def comp_stats_agegroup(self,border=[19,35,50]):
n_groups=len(border)
low=border.copy()
high=border.copy()
high[0:n_groups-1]=border[1:n_groups]
high[-1]=65
employed=np.zeros(n_groups)
unemployed=np.zeros(n_groups)
ahtv=np.zeros(n_groups)
parttimeratio=np.zeros(n_groups)
unempratio=np.zeros(n_groups)
empratio=np.zeros(n_groups)
i_ps=np.zeros(n_groups)
i_ps_norw=np.zeros(n_groups)
for n in range(n_groups):
l=low[n]
h=high[n]
htv,tyollvaikutus,tyollaste,tyotosuus,tyottomat,osatyollaste=\
self.comp_tyollisyys_stats(self.empstate,scale_time=True,start=l,end=h,agegroups=True)
ps,ps_norw=self.comp_palkkasumma(start=l,end=h)
print(f'l {l} h {h}\nhtv {htv}\ntyollaste {tyollaste}\ntyotosuus {tyotosuus}\ntyottomat {tyottomat}\nosatyollaste {osatyollaste}\nps {ps}')
employed[n]=tyollvaikutus
ahtv[n]=htv
unemployed[n]=tyottomat
unempratio[n]=tyotosuus
empratio[n]=tyollaste
parttimeratio[n]=osatyollaste
i_ps[n]=ps
i_ps_norw[n]=ps_norw
return employed,ahtv,unemployed,parttimeratio,i_ps,i_ps_norw,unempratio,empratio
def comp_unemployed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])[:,None]
#tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
return tyot_osuus
def comp_unemployed_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
else:
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])/nn
#print(f'tyot_osuus {tyot_osuus}')
unemp=self.comp_state_stats(tyot_osuus,start=start,end=end,ratio=True)
return unemp
def comp_parttime_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
'''
Lukumäärätiedot (EI HTV!)
'''
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if not self.minimal:
tyossa=(emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])/nn
osatyossa=(emp[:,10]+emp[:,8])/nn
else:
tyossa=emp[:,1]/nn
osatyossa=0*tyossa
osatyo_osuus=osatyossa/tyossa
osatyo_osuus=self.comp_state_stats(osatyo_osuus,start=start,end=end,ratio=True)
kokotyo_osuus=1-osatyo_osuus
return kokotyo_osuus,osatyo_osuus
def comp_parttime_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
kokotyo_osuus=(emp[:,1])/nn
osatyo_osuus=(emp[:,3])/nn
else:
if grouped:
for g in range(6):
kokotyo_osuus=(emp[:,1,g]+emp[:,9,g])/nn
osatyo_osuus=(emp[:,8,g]+emp[:,10,g])/nn
else:
kokotyo_osuus=(emp[:,1]+emp[:,9])/nn
osatyo_osuus=(emp[:,8]+emp[:,10])/nn
osatyo_osuus=np.reshape(osatyo_osuus,(osatyo_osuus.shape[0],1))
kokotyo_osuus=np.reshape(kokotyo_osuus,(osatyo_osuus.shape[0],1))
return kokotyo_osuus,osatyo_osuus
def comp_employed_ratio(self,emp):
tyoll_osuus,htv_osuus=self.comp_employed_ratio_by_age(emp)
tyot_osuus=self.comp_unemployed_ratio_by_age(emp)
kokotyo_osuus,osatyo_osuus=self.comp_parttime_ratio_by_age(emp)
return tyoll_osuus,htv_osuus,tyot_osuus,kokotyo_osuus,osatyo_osuus
def comp_unemployed_detailed(self,emp):
if self.minimal:
ansiosid_osuus=emp[:,0]/np.sum(emp,1)
tm_osuus=ansiosid_osuus*0
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
ansiosid_osuus=(emp[:,0]+emp[:,4])/np.sum(emp,1)
tm_osuus=(emp[:,13])/np.sum(emp,1)
return ansiosid_osuus,tm_osuus
def comp_tyollisyys_stats(self,emp,scale_time=True,start=19,end=68,full=False,tyot_stats=False,agg=False,shapes=False,only_groups=False,g=0,agegroups=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
scalex=demog2[min_cage:max_cage]/self.n_pop*scale
if only_groups:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
else:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
htv=np.sum(scalex*htvosuus[min_cage:max_cage])
tyollvaikutus=np.sum(scalex*tyollosuus[min_cage:max_cage])
tyottomat=np.sum(scalex*tyot_osuus[min_cage:max_cage])
osatyollvaikutus=np.sum(scalex*osatyo_osuus[min_cage:max_cage])
kokotyollvaikutus=np.sum(scalex*kokotyo_osuus[min_cage:max_cage])
haj=np.mean(np.std(tyollosuus[min_cage:max_cage]))
tyollaste=tyollvaikutus/(np.sum(scalex)*self.n_pop)
osatyollaste=osatyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
kokotyollaste=kokotyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
if tyot_stats:
if agg:
#d2=np.squeeze(demog2)
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
else:
d2=np.squeeze(demog2)
tyolliset_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyottomat_ika=np.squeeze(scale*d2*np.squeeze(tyot_osuus))
htv_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
elif full:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus,osatyollvaikutus,kokotyollvaikutus,osatyollaste,kokotyollaste
elif agegroups:
tyot_osuus=self.comp_unemployed_aggregate(start=start,end=end)
return htv,tyollvaikutus,tyollaste,tyot_osuus,tyottomat,osatyollaste
else:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus
def comp_employment_stats(self,scale_time=True,returns=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(self.min_age)
max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
d=np.squeeze(demog2[min_cage:max_cage])
self.ratiostates=self.empstate/self.alive
self.demogstates=(self.empstate.T*scalex).T
if self.minimal>0:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,3]
self.stats_parttime=self.demogstates[:,3]
self.stats_unemployed=self.demogstates[:,0]
self.stats_all=np.sum(self.demogstates,1)
else:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,10]+self.demogstates[:,8]+self.demogstates[:,9]
self.stats_parttime=self.demogstates[:,10]+self.demogstates[:,8]
self.stats_unemployed=self.demogstates[:,0]+self.demogstates[:,4]+self.demogstates[:,13]
self.stats_all=np.sum(self.demogstates,1)
if returns:
return self.stats_employed,self.stats_parttime,self.stats_unemployed
# def test_emp(self):
# g_emp=0
# g_htv=0
# g_10=0
# g_1=0
# g_8=0
# g_9=0
# g_x=0
# scalex=1
#
# demog2=self.empstats.get_demog()
# scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#
#
# for g in range(6):
# q=self.comp_participants(grouped=True,g=g)
# #g_1+=np.sum(self.gempstate[:,1,g])
# #g_10+=np.sum(self.gempstate[:,10,g])
# #g_8+=np.sum(self.gempstate[:,8,g])
# #g_9+=np.sum(self.gempstate[:,9,g])
# g_emp+=q['palkansaajia']
# g_htv+=q['htv']
# g_x+=np.sum((self.gempstate[:,1,g]+self.gempstate[:,10,g])*scalex)
#
# q=self.comp_participants()
# s_1=np.sum(self.empstate[:,1])
# s_10=np.sum(self.empstate[:,10])
# s_8=np.sum(self.empstate[:,8])
# s_9=np.sum(self.empstate[:,9])
# s_x=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
# emp=q['palkansaajia']
# htv=q['htv']
#
# print(f'htv {htv} vs g_htv {g_htv}')
# print(f'emp {emp} vs g_emp {g_emp}')
# print(f's_x {s_x} vs g_x {g_x}')
# #print(f's_1 {s_1} vs g_1 {g_1}')
# #print(f's_10 {s_10} vs g_10 {g_10}')
# #print(f's_8 {s_8} vs g_8 {g_8}')
# #print(f's_9 {s_9} vs g_9 {g_9}')
def comp_participants(self,scale=True,include_retwork=True,grouped=False,g=0):
'''
<NAME> lkm
scalex olettaa, että naisia & miehiä yhtä paljon. Tämän voisi tarkentaa.
'''
demog2=self.empstats.get_demog()
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#print('version',self.version)
q={}
if self.version in set([1,2,3,4]):
if grouped:
#print('group=',g)
emp=np.squeeze(self.gempstate[:,:,g])
q['yhteensä']=np.sum(np.sum(emp,axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10]+0.5*emp[:,8]+emp[:,9])*scalex)
else:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((emp[:,0]+emp[:,4])*scalex)
q['tmtuella']=np.sum(emp[:,13]*scalex)
q['isyysvapaalla']=np.sum(emp[:,6]*scalex)
q['kotihoidontuella']=np.sum(emp[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(emp[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10]+self.empstate[:,8]+self.empstate[:,9])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10]+0.5*self.empstate[:,8]+self.empstate[:,9])*scalex)
else:
q['palkansaajia']= | np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex) | numpy.sum |
import numpy as np
class FDBuffer(object):
"""A block of possibly-zero frequency-domain samples of a given size.
Attributes:
is_zero (bool): Are all samples in this block zero? If False, then buffer is not Null.
buffer (None or array): Complex samples.
"""
def __init__(self, block_size):
self.block_size = block_size
self.is_zero = True
self.buffer = None
def alloc_buffer(self):
if self.buffer is None:
self.buffer = np.zeros(self.block_size + 1, dtype=np.complex)
def clear(self):
self.is_zero = True
if self.buffer is not None:
self.buffer.fill(0)
def __iadd__(self, other):
if not other.is_zero:
self.alloc_buffer()
self.buffer += other.buffer
self.is_zero = False
return self
@classmethod
def from_td(cls, block_size, td):
b = cls(block_size)
if np.any(td):
b.is_zero = False
b.buffer = np.fft.rfft(td, block_size * 2)
return b
def to_td(self, td):
if not self.is_zero:
td[:] = np.fft.irfft(self.buffer)[:self.block_size]
def FDBuffers_to_td(buffers):
"""Turn a list of frequency-domain buffers into an array of time-domain
samples of the same shape."""
td = np.zeros((len(buffers), buffers[0].block_size))
for buf, td_channel in zip(buffers, td):
buf.to_td(td_channel)
return td
def fma(x, a, b):
"""Implement x += a * b for FDBuffer arguments"""
if (not a.is_zero) and (not b.is_zero):
x.alloc_buffer()
x.buffer += a.buffer * b.buffer
x.is_zero = False
class MatrixBlockConvolver(object):
"""Apply a matrix of time-domain filters.
This can be more efficient than using OverlapSaveConvolver if some input or
output channels are reused, as we only need to FFT each input and output
channel once.
Parameters:
block_size (int): time domain block size for input and output blocks
n_in (int): number of input channels
n_out (int): number of output channels
filters (list): Single-channel filters to apply. Each element is a
3-tuple containing the input channel number, output channel number,
and a single channel filter.
"""
class FDConvolverChannel(object):
"""A single channel of concolution in the frequency domain."""
def __init__(self, block_size, f):
self.block_size = block_size
self.filter_blocks_fd = []
self.blocks_fd = []
for start in range(0, len(f), self.block_size):
end = min(len(f), start + self.block_size)
self.filter_blocks_fd.append(
FDBuffer.from_td(self.block_size, f[start:end]))
self.blocks_fd.append(FDBuffer(self.block_size))
def filter_block(self, in_block_fd):
# clear the returned block from the previous frame
self.blocks_fd[-1].clear()
for filter_block, block in zip(self.filter_blocks_fd,
self.blocks_fd):
fma(block, filter_block, in_block_fd)
self.blocks_fd.append(self.blocks_fd.pop(0))
return self.blocks_fd[-1]
def __init__(self, block_size, n_in, n_out, filters):
self.block_size = block_size
self.filters = [(in_ch, out_ch,
self.FDConvolverChannel(block_size, filter))
for in_ch, out_ch, filter in filters]
self.input_block = | np.zeros((n_in, block_size * 2)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################################################
# inversion of ETAS parameters
#
# as described by Mizrahi et al., 2021
# <NAME>, <NAME>, <NAME>;
# The Effect of Declustering on the Size Distribution of Mainshocks.
# Seismological Research Letters 2021; doi: https://doi.org/10.1785/0220200231
##############################################################################
from scipy.optimize import minimize
from scipy.special import gamma as gamma_func, gammaln, gammaincc, exp1
import pandas as pd
import numpy as np
import geopandas as gpd
import datetime as dt
import json
import os
import pprint
from functools import partial
import pyproj
from shapely.geometry import Polygon
import shapely.ops as ops
from mc_b_est import round_half_up, estimate_beta_tinti
def coppersmith(mag, typ):
# result is in km
# typ is one of the following:
# 1: strike slip fault
# 2: reverse fault
# 3: normal fault
# 4: oblique fault
if typ == 1:
# surface rupture length
SRL = np.power(10, (0.74 * mag - 3.55))
# subsurface rupture length
SSRL = np.power(10, (0.62 * mag - 2.57))
# rupture width
RW = np.power(10, (0.27 * mag - 0.76))
# rupture area
RA = np.power(10, (0.9 * mag - 3.42))
# average slip
AD = np.power(10, (0.9 * mag - 6.32))
elif typ == 2:
# surface rupture length
SRL = np.power(10, (0.63 * mag - 2.86))
# subsurface rupture length
SSRL = np.power(10, (0.58 * mag - 2.42))
# rupture width
RW = np.power(10, (0.41 * mag - 1.61))
# rupture area
RA = np.power(10, (0.98 * mag - 3.99))
# average slip
AD = np.power(10, (0.08 * mag - 0.74))
elif typ == 3:
# surface rupture length
SRL = np.power(10, (0.5 * mag - 2.01))
# subsurface rupture length
SSRL = np.power(10, (0.5 * mag - 1.88))
# rupture width
RW = np.power(10, (0.35 * mag - 1.14))
# rupture area
RA = np.power(10, (0.82 * mag - 2.87))
# average slip
AD = np.power(10, (0.63 * mag - 4.45))
elif typ == 4:
# surface rupture length
SRL = np.power(10, (0.69 * mag - 3.22))
# subsurface rupture length
SSRL = np.power(10, (0.59 * mag - 2.44))
# rupture width
RW = np.power(10, (0.32 * mag - 1.01))
# rupture area
RA = np.power(10, (0.91 * mag - 3.49))
# average slip
AD = np.power(10, (0.69 * mag - 4.80))
return {
'SRL': SRL,
'SSRL': SSRL,
'RW': RW,
'RA': RA,
'AD': AD
}
def rectangle_surface(lat1, lat2, lon1, lon2):
l = [[lat1, lon1],
[lat2, lon1],
[lat2, lon2],
[lat1, lon2]]
polygon = Polygon(l)
geom_area = ops.transform(
partial(
pyproj.transform,
pyproj.Proj('EPSG:4326'),
pyproj.Proj(
proj='aea',
lat1=polygon.bounds[0],
lat2=polygon.bounds[2])),
polygon)
return geom_area.area / 1e6
def polygon_surface(polygon):
geom_area = ops.transform(
partial(
pyproj.transform,
pyproj.Proj('EPSG:4326'),
pyproj.Proj(
proj='aea',
lat_1=polygon.bounds[0],
lat_2=polygon.bounds[2])),
polygon)
return geom_area.area / 1e6
def hav(theta):
return np.square(np.sin(theta / 2))
def haversine(lat_rad_1, lat_rad_2, lon_rad_1, lon_rad_2, earth_radius=6.3781e3):
# to calculate distance on a sphere
d = 2 * earth_radius * np.arcsin(
np.sqrt(
hav(lat_rad_1 - lat_rad_2)
+ np.cos(lat_rad_1)
* np.cos(lat_rad_2)
* hav(lon_rad_1 - lon_rad_2)
)
)
return d
def branching_ratio(theta, beta):
log10_mu, log10_k0, a, log10_c, omega, log10_tau, log10_d, gamma, rho = theta
k0 = np.power(10, log10_k0)
c = np.power(10, log10_c)
d = np.power(10, log10_d)
tau = np.power(10, log10_tau)
eta = beta * k0 * np.pi * np.power(d, -rho) * np.power(tau, -omega) * np.exp(c / tau) * upper_gamma_ext(-omega,c / tau) / (rho * (-a + beta + gamma * rho))
return eta
def to_days(timediff):
return timediff / dt.timedelta(days=1)
def upper_gamma_ext(a, x):
if a > 0:
return gammaincc(a, x) * gamma_func(a)
elif a == 0:
return exp1(x)
else:
return (upper_gamma_ext(a + 1, x) - np.power(x, a)*np.exp(-x)) / a
def parameter_array2dict(theta):
return dict(zip(
['log10_mu', 'log10_k0', 'a', 'log10_c', 'omega', 'log10_tau', 'log10_d', 'gamma', 'rho'],
theta
))
def parameter_dict2array(parameters):
order = ['log10_mu', 'log10_k0', 'a', 'log10_c', 'omega', 'log10_tau', 'log10_d', 'gamma', 'rho']
return np.array([
parameters[key] for key in order
])
def set_initial_values(ranges=None):
if ranges is None:
log10_mu_range = (-10, 0)
log10_k0_range = (-4, 0)
a_range = (0.01, 5.)
log10_c_range = (-8, 0)
omega_range = (-0.99, 1)
log10_tau_range = (0.01, 5)
log10_d_range = (-4, 1)
gamma_range = (0.01, 5.)
rho_range = (0.01, 5.)
else:
log10_mu_range, log10_k0_range, a_range, log10_c_range, omega_range, log10_tau_range, log10_d_range, gamma_range, rho_range = ranges
log10_mu = np.random.uniform(*log10_mu_range)
log10_k0 = np.random.uniform(*log10_k0_range)
a = np.random.uniform(*a_range)
log10_c = np.random.uniform(*log10_c_range)
omega = np.random.uniform(*omega_range)
log10_tau = np.random.uniform(*log10_tau_range)
log10_d = np.random.uniform(*log10_d_range)
gamma = np.random.uniform(*gamma_range)
rho = np.random.uniform(*rho_range)
return [
log10_mu,
log10_k0,
a,
log10_c,
omega,
log10_tau,
log10_d,
gamma,
rho
]
def prepare_catalog(data, mc, coppersmith_multiplier, timewindow_start, timewindow_end, earth_radius,
delta_m=0):
# precalculates distances in time and space between events that are potentially related to each other
calc_start = dt.datetime.now()
# only use data above completeness magnitude
if delta_m > 0:
data["magnitude"] = round_half_up(data["magnitude"] / delta_m) * delta_m
relevant = data.query("magnitude >= @mc").copy()
relevant.sort_values(by='time', inplace=True)
# all entries can be sources, but targets only after timewindow start
targets = relevant.query("time>=@timewindow_start").copy()
# calculate some source stuff
relevant["distance_range_squared"] = np.square(
coppersmith(relevant["magnitude"], 4)["SSRL"] * coppersmith_multiplier
)
relevant["source_to_end_time_distance"] = to_days(timewindow_end - relevant["time"])
relevant["pos_source_to_start_time_distance"] = to_days(timewindow_start - relevant["time"]).apply(
lambda x: max(x, 0)
)
# translate target lat, lon to radians for spherical distance calculation
targets['target_lat_rad'] = np.radians(targets['latitude'])
targets['target_lon_rad'] = np.radians(targets['longitude'])
targets["target_time"] = targets["time"]
targets["target_id"] = targets.index
targets["target_time"] = targets["time"]
# columns that are needed later
targets["source_id"] = 'i'
targets["source_magnitude"] = 0.0
targets["time_distance"] = 0.0
targets["spatial_distance_squared"] = 0.0
targets["source_to_end_time_distance"] = 0.0
targets["pos_source_to_start_time_distance"] = 0.0
targets = targets.sort_values(by="time")
# define index and columns that are later going to be needed
if pd.__version__ >= '0.24.0':
index = pd.MultiIndex(
levels=[[], []],
names=["source_id", "target_id"],
codes=[[], []]
)
else:
index = pd.MultiIndex(
levels=[[], []],
names=["source_id", "target_id"],
labels=[[], []]
)
columns = [
"target_time",
"source_magnitude",
"spatial_distance_squared",
"time_distance",
"source_to_end_time_distance",
"pos_source_to_start_time_distance"
]
res_df = pd.DataFrame(index=index, columns=columns)
df_list = []
print(' number of sources:', len(relevant.index))
print(' number of targets:', len(targets.index))
for source in relevant.itertuples():
stime = source.time
# filter potential targets
if source.time < timewindow_start:
potential_targets = targets.copy()
else:
potential_targets = targets.query(
"time>@stime"
).copy()
targets = potential_targets.copy()
if potential_targets.shape[0] == 0:
continue
# get values of source event
slatrad = np.radians(source.latitude)
slonrad = np.radians(source.longitude)
drs = source.distance_range_squared
# get source id and info of target events
potential_targets["source_id"] = source.Index
potential_targets["source_magnitude"] = source.magnitude
# calculate space and time distance from source to target event
potential_targets["time_distance"] = to_days(potential_targets["target_time"] - stime)
potential_targets["spatial_distance_squared"] = np.square(
haversine(
slatrad,
potential_targets['target_lat_rad'],
slonrad,
potential_targets['target_lon_rad'],
earth_radius
)
)
# filter for only small enough distances
potential_targets.query("spatial_distance_squared <= @drs", inplace=True)
# calculate time distance from source event to timewindow boundaries for integration later
potential_targets["source_to_end_time_distance"] = source.source_to_end_time_distance
potential_targets["pos_source_to_start_time_distance"] = source.pos_source_to_start_time_distance
# append to resulting dataframe
df_list.append(potential_targets)
res_df = pd.concat(df_list)[["source_id", "target_id"] + columns].reset_index().set_index(
["source_id", "target_id"])
print(' took', (dt.datetime.now() - calc_start), 'to prepare the distances\n')
return res_df
def triggering_kernel(metrics, params):
# given time distance in days and squared space distance in square km and magnitude of target event,
# calculate the (not normalized) likelihood, that source event triggered target event
time_distance, spatial_distance_squared, m = metrics
theta, mc = params
log10_mu, log10_k0, a, log10_c, omega, log10_tau, log10_d, gamma, rho = theta
mu = np.power(10, log10_mu)
k0 = np.power(10, log10_k0)
c = np.power(10, log10_c)
tau = | np.power(10, log10_tau) | numpy.power |
import numpy as np
import matplotlib.pyplot as plt
import argparse
from planet import tools
import functools
import os
import sys
from scipy import stats
from collections import OrderedDict
# name = 'hard_negative'
# name = 'contra_traj12'
# name = 'contra_step'
name = 'log_likeli'
name = 'planning'
OUT_DIR = 'out_cem/' + name
PALETTE = 10 * (
'#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628',
'#f781bf', '#888888', '#a6cee3', '#b2df8a', '#cab2d6', '#fb9a99',
'#fdbf6f')
'''
python -m planet.scripts.plot_cem --logdir logload/
'''
def upper_tri_masking(A):
m = A.shape[0]
r = np.arange(m)
mask = r[:, None] < r
return A[mask]
def stratify_by_iter(trajs, part_s=3):
num_parts = int(np.ceil(10 / part_s))
num_iters = trajs.shape[0]
itr = np.arange(num_iters) % 10
batch = {}
prev = | np.zeros_like(itr) | numpy.zeros_like |
from .transients import ihpss
from .onset import OnsetDetector, ODF
from .consensus import ConsensusBeatTracker
from .params import DEFAULTS
import numpy
def align_beats_onsets(beats, onsets, thresh):
i = 0
j = 0
aligned_beats = []
while i < len(onsets) and j < len(beats):
curr_onset = onsets[i]
curr_beat = beats[j]
if | numpy.abs(curr_onset - curr_beat) | numpy.abs |
# coding=UTF-8
from manimlib.imports import *
import numpy as np
numbers = [21, 99, 49, 11, 66, 5, 78, 86]
class Sort(Scene):
def construct(self):
# 显示文字
text1 = Text("归并排序\n\n采用分治法\n先二分成无数个子序列\n再对每个子序列排序\n最后合并为有序序列", color=WHITE, font="黑体")
text1.scale(1.5)
text1.move_to(np.array([0.0, 0.0, 0.0]))
self.play(ShowCreation(text1))
self.wait(2)
self.play(Uncreate(text1))
# 1级
group1 = VGroup()
for i in range(8):
group1.add(Square(side_length=1))
if i > 0: group1[i].next_to(group1[i-1], RIGHT, 0)
group1.move_to(np.array([0.0, 3.0, 0.0]))
self.play(FadeIn(group1))
# 数字
elements = []
for i in range(len(numbers)):
elements.append(Integer(numbers[i]))
elements[i].move_to(np.array([-3.5 + i * 1.0, 3.0, 0.0]))
self.play(ShowCreation(elements[i]))
# 2级
arrow1to2_1 = Arrow(start= | np.array([-0.5, 2.5, 0.0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
figSaveDir = '/home/banua/Dropbox/similarity-metric/fig/'
datasetzoo = 'zoo'
datasetmaccs = 'maccs'
datasetjamu = 'jamu'
sce = '2'
fnameMaxZoo = '/home/banua/xprmt/xprmt-icacsis16/'+datasetzoo+'/matrixMax-zoo-'+sce+'.csv'
fnameMaxMaccs = '/home/banua/xprmt/xprmt-icacsis16/'+datasetmaccs+'/matrixMax-maccs-'+sce+'.csv'
fnameMaxJamu = '/home/banua/xprmt/xprmt-icacsis16/'+datasetjamu+'/matrixMax-jamu-'+sce+'.csv'
x = np.arange(101)
maxZoo = np.loadtxt(fnameMaxZoo, delimiter='\t')
maxzoostd = [np.std(maxZoo[i, :]) for i in range(0, maxZoo.shape[0])]
maxZoo = [np.average(maxZoo[i, :]) for i in range(0, maxZoo.shape[0])]
maxMaccs = np.loadtxt(fnameMaxMaccs, delimiter='\t')
maxMaccsstd = [np.std(maxMaccs[i, :]) for i in range(0, maxMaccs.shape[0])]
maxMaccs = [np.average(maxMaccs[i, :]) for i in range(0, maxMaccs.shape[0])]
maxJamu = np.loadtxt(fnameMaxJamu, delimiter='\t')
maxJamustd = [ | np.std(maxJamu[i, :]) | numpy.std |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import utility_functions as utilfunc
import sys
import config
# Import from support function repo
import dispatch_functions as dFuncs
import tariff_functions as tFuncs
import decorators
np.seterr(divide='ignore', invalid='ignore')
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_size_and_financial_performance(agent):
"""
This function accepts the characteristics of a single agent and
evaluates the financial performance of a set of solar+storage
system sizes. The system size with the highest NPV is selected.
Parameters
----------
agent : pandas.Series
Single agent (row) from an agent dataframe.
Returns
-------
pandas.Series
Agent with system size, business model and corresponding financial performance.
"""
#=========================================================================#
# Setup
#=========================================================================#
try:
in_cols = list(agent.index)
if config.VERBOSE:
logger.info(' ')
logger.info("\tRunning system size calculations for: {}, {}, {}".format(agent['state'], agent['tariff_class'], agent['sector_abbr']))
logger.info('real_discount: {}'.format(agent['discount_rate']))
logger.info('loan_rate: {}'.format(agent['loan_rate']))
logger.info('down_payment: {}'.format(agent['down_payment']))
# Set resolution of dispatcher
d_inc_n_est = 10
DP_inc_est = 12
d_inc_n_acc = 20
DP_inc_acc = 12
# Extract load profile
load_profile = np.array(agent['consumption_hourly'])
agent.loc['timesteps_per_year'] = 1
# Extract load profile
pv_cf_profile = np.array(agent['solar_cf_profile']) / 1e3
agent['naep'] = float(np.sum(pv_cf_profile))
# Create battery object
batt = dFuncs.Battery()
batt_ratio = 3.0
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# Create export tariff object
if agent['nem_system_size_limit_kw'] != 0:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
original_bill, original_results = tFuncs.bill_calculator(load_profile, tariff, export_tariff)
if config.VERBOSE:
logger.info('original_bill: {}'.format(original_bill))
agent['first_year_elec_bill_without_system'] = original_bill * agent['elec_price_multiplier']
if config.VERBOSE:
logger.info('multiplied original bill: {}'.format(agent['first_year_elec_bill_without_system']))
if agent['first_year_elec_bill_without_system'] == 0:
agent['first_year_elec_bill_without_system']=1.0
agent['first_year_elec_cents_per_kwh_without_system'] = agent['first_year_elec_bill_without_system'] / agent['load_per_customer_in_bin_kwh']
#=========================================================================#
# Estimate bill savings revenue from a set of solar+storage system sizes
#=========================================================================#
max_size_load = agent.loc['load_per_customer_in_bin_kwh']/agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['developable_buildings_pct'] * agent.loc['pv_power_density_w_per_sqft']/1000.0
agent.loc['max_pv_size'] = min([max_size_load, max_size_roof, agent.loc['nem_system_size_limit_kw']])
if config.VERBOSE:
logger.info('max_size_load: {}'.format(max_size_load))
logger.info('max_size_roof: {}'.format(max_size_roof))
dynamic_sizing = True #False
if dynamic_sizing:
pv_sizes = np.arange(0, 1.1, 0.1) * agent.loc['max_pv_size']
else:
# Size the PV system depending on NEM availability, either to 95% of load w/NEM, or 50% w/o NEM. In both cases, roof size is a constraint.
if export_tariff.full_retail_nem==True:
pv_sizes = np.array([min(max_size_load * 0.95, max_size_roof)])
else:
pv_sizes = np.array([min(max_size_load * 0.5, max_size_roof)])
batt_powers = np.zeros(1)
# Calculate the estimation parameters for each PV size
est_params_df = pd.DataFrame(index=pv_sizes)
est_params_df['estimator_params'] = 'temp'
for pv_size in pv_sizes:
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
est_params_df.at[pv_size, 'estimator_params'] = dFuncs.calc_estimator_params(load_and_pv_profile, tariff, export_tariff, batt.eta_charge, batt.eta_discharge)
# Create df with all combinations of solar+storage sizes
system_df = pd.DataFrame(dFuncs.cartesian([pv_sizes, batt_powers]), columns=['pv', 'batt_kw'])
system_df['est_bills'] = None
pv_kwh_by_year = np.array([sum(x) for x in np.split(np.array(pv_cf_profile), agent.loc['timesteps_per_year'])])
pv_kwh_by_year = np.concatenate([(pv_kwh_by_year - ( pv_kwh_by_year * agent.loc['pv_deg'] * i)) for i in range(1, agent.loc['economic_lifetime']+1)])
system_df['kwh_by_timestep'] = system_df['pv'].apply(lambda x: x * pv_kwh_by_year)
n_sys = len(system_df)
for i in system_df.index:
pv_size = system_df['pv'][i].copy()
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
# for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_all = np.sum(pv_size * pv_cf_profile * agent.loc['wholesale_elec_use_per_kwh'])
system_df.loc[i, 'est_bills'] = original_bill - sell_all
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
if pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
if pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for agents with no compensation mechanism: set sell rate to 0 and calculate bill with net load profile
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# Calculate bill savings cash flow
# elec_price_multiplier is the scalar increase in the cost of electricity since 2016, when the tariffs were curated
# elec_price_escalator is this agent's assumption about how the price of electricity will change in the future.
avg_est_bill_savings = (original_bill - np.array(system_df['est_bills'])).reshape([n_sys, 1]) * agent['elec_price_multiplier']
est_bill_savings = np.zeros([n_sys, agent['economic_lifetime']+1])
est_bill_savings[:,1:] = avg_est_bill_savings
escalator = (np.zeros(agent['economic_lifetime']+1) + agent['elec_price_escalator'] + 1)**list(range(agent['economic_lifetime']+1))
degradation = (np.zeros(agent['economic_lifetime']+1) + 1 - agent['pv_deg'])**list(range(agent['economic_lifetime']+1))
est_bill_savings = est_bill_savings * escalator * degradation
system_df['est_bill_savings'] = est_bill_savings[:, 1]
# simple representation of 70% minimum of batt charging from PV in order to
# qualify for the ITC. Here, if batt kW is greater than 25% of PV kW, no ITC.
batt_chg_frac = np.where(system_df['pv'] >= system_df['batt_kw']*4.0, 1.0, 0)
#=========================================================================#
# Determine financial performance of each system size
#=========================================================================#
if 'investment_incentive_pct' in agent.index:
if agent['investment_incentive_year_cutoff'] >= agent['year']:
investment_incentives = np.full(system_df.shape[0], agent['investment_incentive_pct'])
else:
investment_incentives = np.zeros(system_df.shape[0])
else:
investment_incentives = np.zeros(system_df.shape[0])
if 'capacity_incentive' in agent.index:
raise NotImplementedError
else:
capacity_based_incentives = np.zeros(system_df.shape[0])
if 'production_incentive' in agent.index:
raise NotImplementedError
else:
production_based_incentives = np.tile(np.array([0]*agent.loc['economic_lifetime']), (system_df.shape[0],1))
if 'cash_incentives' in agent.index:
raise NotImplementedError
else:
cash_incentives = np.array([0]*system_df.shape[0])
cf_results_est = cashflow_constructor(bill_savings=est_bill_savings,
pv_size=np.array(system_df['pv']), pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=np.array(system_df['batt_kw'])*batt_ratio, batt_power=np.array(system_df['batt_kw']),
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent.loc['batt_om_per_kw'], batt_om_per_kwh=agent.loc['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent['tax_rate'], state_tax_rate=0, real_d=agent['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
system_df['npv'] = cf_results_est['npv']
#=========================================================================#
# Select system size and business model for this agent
#=========================================================================#
index_of_best_fin_perform_ho = system_df['npv'].idxmax()
opt_pv_size = system_df['pv'][index_of_best_fin_perform_ho].copy()
opt_batt_power = system_df['batt_kw'][index_of_best_fin_perform_ho].copy()
opt_batt_cap = opt_batt_power*batt_ratio
batt.set_cap_and_power(opt_batt_cap, opt_batt_power)
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_all = np.sum(opt_pv_size * pv_cf_profile * agent.loc['wholesale_elec_usd_per_kwh'])
opt_bill = original_bill - sell_all
# package into "dummy" dispatch results dictionary
accurate_results = {'bill_under_dispatch' : opt_bill, 'batt_dispatch_profile' : np.zeros(len(load_profile))}
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# add system size class
system_size_breaks = [0.0, 2.5, 5.0, 10.0, 20.0, 50.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 1500.0, 3000.0]
#=========================================================================#
# Determine dispatch trajectory for chosen system size
#=========================================================================#
opt_bill = accurate_results['bill_under_dispatch'] #+ one_time_charge
agent.loc['first_year_elec_bill_with_system'] = opt_bill * agent.loc['elec_price_multiplier']
agent.loc['first_year_elec_bill_savings'] = agent.loc['first_year_elec_bill_without_system'] - agent.loc['first_year_elec_bill_with_system']
agent.loc['first_year_elec_bill_savings_frac'] = agent.loc['first_year_elec_bill_savings'] / agent.loc['first_year_elec_bill_without_system']
opt_bill_savings = np.zeros([1, agent.loc['economic_lifetime'] + 1])
opt_bill_savings[:, 1:] = (original_bill - opt_bill)
opt_bill_savings = opt_bill_savings * agent.loc['elec_price_multiplier'] * escalator * degradation
# If the batt kW is less than 25% of the PV kW, apply the ITC
if opt_pv_size >= opt_batt_power*4:
batt_chg_frac = 1.0
else:
batt_chg_frac = 0.0
cash_incentives = np.array([cash_incentives[index_of_best_fin_perform_ho]])
investment_incentives = np.array([investment_incentives[index_of_best_fin_perform_ho]])
capacity_based_incentives = np.array([capacity_based_incentives[index_of_best_fin_perform_ho]])
production_based_incentives = np.array(production_based_incentives[index_of_best_fin_perform_ho])
cf_results_opt = cashflow_constructor(bill_savings=opt_bill_savings,
pv_size=opt_pv_size, pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=opt_batt_cap, batt_power=opt_batt_power,
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent['batt_om_per_kw'], batt_om_per_kwh=agent['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent.loc['tax_rate'], state_tax_rate=0, real_d=agent.loc['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
#=========================================================================#
# Package results
#=========================================================================#
agent['pv_kw'] = opt_pv_size
agent['batt_kw'] = opt_batt_power
agent['batt_kwh'] = opt_batt_cap
agent['npv'] = cf_results_opt['npv'][0]
agent['cash_flow'] = cf_results_opt['cf'][0]
agent['batt_dispatch_profile'] = accurate_results['batt_dispatch_profile']
agent['bill_savings'] = opt_bill_savings
agent['aep'] = agent['pv_kw'] * agent['naep']
agent['cf'] = agent['naep']/8760
agent['system_size_factors'] = np.where(agent['pv_kw'] == 0, 0, pd.cut([agent['pv_kw']], system_size_breaks))[0]
agent['export_tariff_results'] = original_results
out_cols = list(agent.index)
new_cols = [i for i in out_cols if i not in in_cols] + ['agent_id']
agent = agent.loc[agent.index.isin(new_cols)]
except Exception as e:
logger.info(' ')
logger.info('--------------------------------------------')
logger.info("failed in calc_system_size_and_financial_performance")
logger.info(('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e))
logger.info('agent that failed')
logger.info(agent)
logger.info('--------------------------------------------')
agent.to_pickle('agent_that_failed.pkl')
return agent
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_financial_performance(dataframe):
"""
Function to calculate the payback period and join it on the agent dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Agent dataframe
Returns
-------
pandas.DataFrame
Agent dataframe with `payback_period` joined on dataframe
"""
# dataframe = dataframe.reset_index()
cfs = np.vstack(dataframe['cash_flow']).astype(np.float)
# calculate payback period
tech_lifetime = np.shape(cfs)[1] - 1
payback = calc_payback_vectorized(cfs, tech_lifetime)
# calculate time to double
ttd = calc_ttd(cfs)
metric_value = np.where(dataframe['sector_abbr']=='res', payback, ttd)
dataframe['metric_value'] = metric_value
dataframe = dataframe.set_index('agent_id')
return dataframe
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_max_market_share(dataframe, max_market_share_df):
"""
Calculates the maximum marketshare available for each agent.
Parameters
----------
dataframe : pandas.DataFrame
Attributes
----------
metric_value : float
max_market_share_df : pandas.DataFrame
Set by :meth:`settings.ScenarioSettings.get_max_marketshare`.
Returns
-------
pandas.DataFrame
Input DataFrame with `max_market_share` and `metric` columns joined on.
"""
in_cols = list(dataframe.columns)
dataframe = dataframe.reset_index()
dataframe['business_model'] = 'host_owned'
dataframe['metric'] = 'payback_period'
# Convert metric value to integer as a primary key, then bound within max market share ranges
max_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.max()
min_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.min()
max_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.max()
min_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.min()
# copy the metric valeus to a new column to store an edited version
metric_value_bounded = dataframe['metric_value'].values.copy()
# where the metric value exceeds the corresponding max market curve bounds, set the value to the corresponding bound
metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] < min_payback))] = min_payback
metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] > max_payback))] = max_payback
metric_value_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] < min_mbs))] = min_mbs
metric_value_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] > max_mbs))] = max_mbs
dataframe['metric_value_bounded'] = metric_value_bounded
# scale and round to nearest int
dataframe['metric_value_as_factor'] = [int(round(i,1) * 100) for i in dataframe['metric_value_bounded']]
# add a scaled key to the max_market_share dataframe too
max_market_share_df['metric_value_as_factor'] = [int(round(float(i), 1) * 100) for i in max_market_share_df['metric_value']]
# Join the max_market_share table and dataframe in order to select the ultimate mms based on the metric value.
dataframe = pd.merge(dataframe, max_market_share_df[['sector_abbr', 'max_market_share','metric_value_as_factor', 'metric', 'business_model']], how = 'left', on = ['sector_abbr','metric_value_as_factor','metric', 'business_model'])
# Derate the maximum market share for commercial and industrial customers in leased buildings by (2/3)
# based on the owner occupancy status (1 = owner-occupied, 2 = leased)
dataframe['max_market_share'] = np.where(dataframe['owner_occupancy_status'] == 2, dataframe['max_market_share']/3,dataframe['max_market_share'])
# out_cols = in_cols + ['max_market_share', 'metric']
out_cols = in_cols + ['max_market_share', 'metric_value_as_factor', 'metric', 'metric_value_bounded']
return dataframe[out_cols]
def calc_ttd(cfs):
"""
Calculate time to double investment based on the MIRR.
This is used for the commercial and industrial sectors.
Parameters
----------
cfs : numpy.ndarray
Project cash flows ($/yr).
Returns
-------
ttd : numpy.ndarray
Time to double investment (years).
"""
irrs = virr(cfs, precision = 0.005, rmin = 0, rmax1 = 0.3, rmax2 = 0.5)
# suppress errors due to irrs of nan
with np.errstate(invalid = 'ignore'):
irrs = np.where(irrs<=0,1e-6,irrs)
ttd = np.log(2) / np.log(1 + irrs)
ttd[ttd <= 0] = 0
ttd[ttd > 30] = 30.1
# also deal with ttd of nan by setting to max payback period (this should only occur when cashflows = 0)
if not np.all(np.isnan(ttd) == np.all(cfs == 0, axis = 1)):
raise Exception("np.nan found in ttd for non-zero cashflows")
ttd[np.isnan(ttd)] = 30.1
return ttd.round(decimals = 1) # must be rounded to nearest 0.1 to join with max_market_share
#%%
def cashflow_constructor(bill_savings,
pv_size, pv_price, pv_om,
batt_cap, batt_power,
batt_cost_per_kw, batt_cost_per_kwh,
batt_om_per_kw, batt_om_per_kwh,
batt_chg_frac,
sector, itc, deprec_sched,
fed_tax_rate, state_tax_rate, real_d,
analysis_years, inflation,
down_payment_fraction, loan_rate, loan_term,
cash_incentives=np.array([0]), ibi=np.array([0]), cbi=np.array([0]), pbi=np.array([[0]]), print_statements=False):
"""
Calculate the system cash flows based on the capex, opex, bill savings, incentives, tax implications, and other factors
Parameters
----------
bill_savings : "numpy.ndarray"
Annual bill savings ($/yr) from system adoption from 1st year through system lifetime
pv_size : "numpy.float64"
system capacity selected by agent (kW)
pv_price : "float"
system capex ($/kW)
pv_om : "float"
system operation and maintanence cost ($/kW)
batt_cap : "numpy.float64"
energy capacity of battery selected (kWh)
batt_power : "numpy.float64"
demand capacity of battery selected (kW)
batt_cost_per_kw : "float"
capex of battery per kW installed ($/kW)
batt_cost_per_kwh : "float"
capex of battery per kWh installed ($/kWh)
batt_om_per_kw : "float"
opex of battery per kW installed ($/kW-yr)
batt_om_per_kwh : "float"
opex of battery per kW installed ($/kWh-yr)
batt_chg_frac : "int"
fraction of the battery's energy that it gets from a co-hosted PV system. Used for ITC calculation.
sector : "str"
agent sector
itc : "float"
fraction of capex offset by federal investment tax credit
deprec_sched : "list"
fraction of capex eligible for tax-based depreciation
fed_tax_rate : "float"
average tax rate as fraction from federal taxes
state_tax_rate : "int"
average tax rate as fraction from state taxes
real_d : "float"
annua discount rate in real terms
analysis_years : "int"
number of years to use in economic analysis
inflation : "float"
annual average inflation rate as fraction e.g. 0.025
down_payment_fraction : "int"
fraction of capex used as system down payment
loan_rate_real : "float"
real interest rate for debt payments
loan_term : "int"
number of years for loan term
cash_incentives : "numpy.ndarray"
array describing eligible cash-based incentives e.g. $
ibi : "numpy.ndarray"
array describing eligible investment-based incentives e.g. 0.2
cbi : "numpy.ndarray"
array describing eligible one-time capacity-based incentives e.g. $/kW
pbi : "numpy.ndarray"
array describing eligible ongoing performance-based incentives e.g $/kWh-yr
Returns
-------
cf : 'dtype
Annual cash flows of project investment ($/yr)
cf_discounted : 'dtype'
Annual discounted cash flows of project investment ($/yr)
npv : 'dtype'
Net present value ($) of project investment using WACC
bill_savings : 'dtype'
Nominal cash flow of the annual bill savings over the lifetime of the system
after_tax_bill_savings : 'dtype'
Effective after-tax bill savings (electricity costs are tax-deductible for commercial entities)
pv_cost : 'dtype'
Capex of system in ($)
batt_cost : 'dtype'
Capex of battery in ($)
installed_cost : 'dtype'
Combined capex of system + battery
up_front_cost : 'dtype
Capex in 0th year as down payment
batt_om_cf : 'dtype'
Annual cashflows of battery opex
operating_expenses : 'dtype'
Combined annual opex of system + battery ($/yr)
pv_itc_value : 'dtype'
Absolute value of investment tax credit for system ($)
batt_itc_value : 'dtype'
Absolute value of investment tax credit for battery ($)
itc_value : 'dtype'
Absolute value of investment tax credit for combined system + battery ($)
deprec_basis : 'dtype'
Absolute value of depreciable basis of system ($)
deprec_deductions : 'dtype'
Annual amount of depreciable capital in given year ($)
initial_debt : 'dtype'
Amount of debt for loan ($)
annual_principal_and_interest_payment : 'dtype'
Annual amount of debt service payment, principal + interest ($)
debt_balance : 'dtype'
Annual amount of debt remaining in given year ($)
interest_payments : 'dtype'
Annual amount of interest payment in given year ($)
principal_and_interest_payments : 'dtype'
Array of annual principal and interest payments ($)
total_taxable_income : 'dtype'
Amount of stateincome from incentives eligible for taxes
state_deductions : 'dtype'
Reduction to state taxable income from interest, operating expenses, or bill savings depending on sector
total_taxable_state_income_less_deductions : 'dtype'
Total taxable state income less any applicable deductions
state_income_taxes : 'dtype'
Amount of state income tax i.e. net taxable income by tax rate
fed_deductions : 'dtype'
Reduction to federal taxable income from interest, operating expenses, or bill savings depending on sector
total_taxable_fed_income_less_deductions : 'dtype'
Total taxable federal income less any applicable deductions
fed_income_taxes : 'dtype'
Amount of federal income tax i.e. net taxable income by tax rate
interest_payments_tax_savings : 'dtype'
Amount of tax savings from deductions of interest payments
operating_expenses_tax_savings : 'dtype'
Amount of tax savings from deductions of operating expenses
deprec_deductions_tax_savings : 'dtype'
Amount of tax savings from deductions of capital depreciation
elec_OM_deduction_decrease_tax_liability : 'dtype'
Amount of tax savings from deductions of electricity costs as deductible business expense
Todo
----
1) Sales tax basis and rate
2) note that sales tax goes into depreciable basis
3) Propery taxes (res can deduct from income taxes, I think)
4) insurance
5) add pre-tax cash flow
6) add residential mortgage option
7) add carbon tax revenue
8) More exhaustive checking. I have confirmed basic formulations against SAM, but there are many permutations that haven't been checked.
9) make incentives reduce depreciable basis
10) add a flag for high incentive levels
11) battery price schedule, for replacements
12) improve inverter replacement
13) improve battery replacement
14) add inflation adjustment for replacement prices
15) improve deprec schedule handling
16) Make financing unique to each agent
17) Make battery replacements depreciation an input, with default of 7 year MACRS
18) Have a better way to deal with capacity vs effective capacity and battery costs
19) Make it so it can accept different loan terms
"""
#################### Massage inputs ########################################
# If given just a single value for an agent-specific variable, repeat that
# variable for each agent. This assumes that the variable is intended to be
# applied to each agent.
if np.size(np.shape(bill_savings)) == 1:
shape = (1, analysis_years + 1)
else:
shape = (np.shape(bill_savings)[0], analysis_years + 1)
n_agents = shape[0]
if np.size(sector) != n_agents or n_agents == 1:
sector = np.repeat(sector, n_agents)
if np.size(fed_tax_rate) != n_agents or n_agents == 1:
fed_tax_rate = np.repeat(fed_tax_rate, n_agents)
if np.size(state_tax_rate) != n_agents or n_agents == 1:
state_tax_rate = np.repeat(state_tax_rate, n_agents)
if np.size(itc) != n_agents or n_agents == 1:
itc = np.repeat(itc, n_agents)
if np.size(pv_size) != n_agents or n_agents == 1:
pv_size = np.repeat(pv_size, n_agents)
if np.size(pv_price) != n_agents or n_agents == 1:
pv_price = np.repeat(pv_price, n_agents)
if np.size(pv_om) != n_agents or n_agents == 1:
pv_om = np.repeat(pv_om, n_agents)
if np.size(batt_cap) != n_agents or n_agents == 1:
batt_cap = np.repeat(batt_cap, n_agents)
if np.size(batt_power) != n_agents or n_agents == 1:
batt_power = np.repeat(batt_power, n_agents)
if np.size(batt_cost_per_kw) != n_agents or n_agents == 1:
batt_cost_per_kw = np.repeat(batt_cost_per_kw, n_agents)
if np.size(batt_cost_per_kwh) != n_agents or n_agents == 1:
batt_cost_per_kwh = | np.repeat(batt_cost_per_kwh,n_agents) | numpy.repeat |
import sys
import scipy.ndimage
import os.path
import HebbLearn as hl
import numpy as np
import matplotlib.pyplot as plt
try:
import h5py
except:
print('h5py cannot be loaded - may cause error')
pass
fl = hl.NonlinearGHA()
num_textures = 688
if os.path.isfile('textures.npy'):
print('==> Load previously saved textures data')
textures = np.load('textures.npy')
else:
print('==> Loading data')
textures = np.zeros((512,512,num_textures))
for i in range(num_textures):
fn = '/home/rabadi/data/textures/' + str(i) + '.jpg'
try:
textures[:,:,i] = scipy.ndimage.imread(fn, flatten=True)/255
except:
print('dimensionality miss-match - fixing')
tmp = scipy.ndimage.imread(fn, flatten=True)/255
if (np.shape(tmp)[0] < 512):
tmp = np.concatenate((tmp, np.random.rand(512-np.shape(tmp)[0],np.shape(tmp)[1])), axis=0)
if (np.shape(tmp)[1] < 512):
tmp = np.concatenate((tmp, np.random.rand(512, 512-np.shape(tmp)[1])), axis=1)
textures[:,:,i] = tmp
np.save('textures.npy',textures)
random = np.random.rand(512,512,np.shape(textures)[2])
random = random/np.max(random) # make sure all normalized
print('==> mean centering data')
pop_mean = np.mean(np.concatenate((random,textures),axis=2))
random = random - pop_mean
textures = textures - pop_mean
pop_std = np.std(np.concatenate((random,textures),axis=2))
random = random/pop_std
textures = textures/pop_std
#plt.imshow(textures[:,:,0], cmap=plt.get_cmap('gray'))
#plt.show()
if len(sys.argv)>1:
filter_size = int(sys.argv[1])
step_size = int(sys.argv[2])
out_dimension = int(sys.argv[3])
LR = float(sys.argv[4])
n_samples = int(sys.argv[5])
else:
filter_size = 512
step_size = 512
out_dimension = 1
LR = 1
n_samples = 500
nonlinearity = hl.LINEAR
LR=0
#print('==> Training')
#random_k = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#textures_k = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#np.save('textures-k.npy',textures_k)
#output = fl.ImageReconstruction(textures[:,:,0], textures_k, filter_size, step_size, nonlinearity)
#plt.imshow(output, cmap=plt.get_cmap('gray'))
#plt.show()
print('==> Classification performance')
tex_vex = np.reshape(textures, (512*512,num_textures), order='F').T
rand_vex = np.reshape(random, (512*512,num_textures), order='F').T
diff_mean = (np.mean(rand_vex[:n_samples,:], axis=0) - np.mean(tex_vex[:n_samples,:], axis=0))
test = np.concatenate((tex_vex[500:600,:], rand_vex[500:600,:]), axis=0)
y = np.ones((200,1))
y[:100]=-1
shuff = np.random.permutation(200)
test = test[shuff,:]
y = y[shuff]
corr = 0
print('==> Training')
k_tex = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
k_rand = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
tex_pop = np.zeros((512,512))
rand_pop = np.zeros((512,512))
for i in range(n_samples):
tex_pop = tex_pop + fl.ImageReconstruction(textures[:,:,i], | np.reshape(k_tex,(1,262144,1)) | numpy.reshape |
Subsets and Splits